diff --git a/.claude/launch.json b/.claude/launch.json new file mode 100644 index 000000000..2c27cd4e5 --- /dev/null +++ b/.claude/launch.json @@ -0,0 +1,65 @@ +{ + "version": "0.0.1", + "configurations": [ + { + "name": "devcon", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devcon-website", "dev"], + "port": 3000 + }, + { + "name": "devcon-app", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devcon-app", "dev"], + "port": 3000 + }, + { + "name": "devcon-api", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devcon-api", "dev"], + "port": 4000 + }, + { + "name": "devcon-ai", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devcon-ai", "dev"], + "port": 3001 + }, + { + "name": "devconnect", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devconnect", "dev"], + "port": 3000 + }, + { + "name": "devconnect-app", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "devconnect-app", "dev"], + "port": 3000 + }, + { + "name": "archive", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "archive", "dev"], + "port": 3000 + }, + { + "name": "perks-portal", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "perks-portal", "dev"], + "port": 3000 + }, + { + "name": "event-app", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "event-app", "dev"], + "port": 3000 + }, + { + "name": "social-ticket", + "runtimeExecutable": "pnpm", + "runtimeArgs": ["--filter", "social-ticket", "dev"], + "port": 3000 + } + ] +} diff --git a/.cursor/rules/devconnect-app.mdc b/.cursor/rules/devconnect-app.mdc deleted file mode 100644 index 704a1403b..000000000 --- a/.cursor/rules/devconnect-app.mdc +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Rules specific to the devconnect-app project -globs: - - "devconnect-app/**/*" ---- - -General Guidelines: - -- Don't write docs unless asked -- When asked to write doc, keep it very minimalist -- Always focus on making small smart changes and reuse code when possible - -Icon Libraries: - -- Always prefer Material Design Icons (@mdi/react + @mdi/js) for icon usage -- Import icons from @mdi/js: `import { mdiIconName } from '@mdi/js'` -- Use Icon component from @mdi/react: `import Icon from '@mdi/react'` -- Example usage: `` -- Browse icons at: -- Icon names use PascalCase with 'mdi' prefix (e.g., 'arrow-left' → `mdiArrowLeft`) -- Only import icons you need (tree-shakeable) -- Fallback to lucide-react only if MDI doesn't have the needed icon - -Custom Images/Icons: - -- For custom SVG icons not available in MDI, place them in `devconnect-app/public/images/` -- Download custom icons using curl from the local asset server: - - ```bash - cd devconnect-app/public/images/icons - curl -o icon-name.svg "http://localhost:3845/assets/{hash}.svg" - ``` - -- Example: `curl -o book-quest-info.svg "http://localhost:3845/assets/0701a8f6ec9ddf685dd525802711649034fc87ae.svg"` -- Reference in code: `/images/image-name.svg` diff --git a/.cursor/rules/shared.mdc b/.cursor/rules/shared.mdc deleted file mode 100644 index a36b18163..000000000 --- a/.cursor/rules/shared.mdc +++ /dev/null @@ -1,39 +0,0 @@ ---- -description: -globs: ---- - -General guidelines: - -- Plan and confirm with the user before executing -- Stay focused. Do not "go rogue", if you want to fix things that weren't asked, ask the user first. - -TypeScript Conventions: - -- Use interfaces over types when possible -- Avoid enums, use const objects instead - -Code Style: - -- Use functional programming patterns -- Avoid side effects where possible -- Use small, reusable pure functions -- Implement proper error handling -- Use meaningful variable names -- Keep functions small and focused -- Write self-documenting code - -Figma Asset Export: - -- When working with Figma designs, use the generated asset URLs from the Figma code tool -- Export assets directly from Figma when localhost URLs are not accessible -- Use this command pattern for downloading Figma assets: - - ```bash - cd /path/to/public/images && \ - curl -o asset-name.png "http://localhost:3845/assets/[asset-id].png" - ``` - -- For direct Figma export: Select asset → Right-click → Export → Choose format → Save to public/images/ -- Name assets descriptively: `provider-logo.png`, `icon-name.svg`, etc. -- Always verify asset accessibility before implementing in code diff --git a/.github/workflows/devcon.yml b/.github/workflows/devcon.yml index 1105f3ff3..a0fd3b0ce 100644 --- a/.github/workflows/devcon.yml +++ b/.github/workflows/devcon.yml @@ -19,15 +19,22 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.13.1 + run_install: false - name: Use Node.js - uses: actions/setup-node@v1 + uses: actions/setup-node@v4 with: - node-version: "18.x" + node-version: "20.x" + cache: "pnpm" - name: Install dependencies - run: yarn + run: pnpm install --frozen-lockfile - name: Run Eslint // Lint - run: yarn lint + run: pnpm exec eslint . --quiet diff --git a/.gitignore b/.gitignore index 74beb9229..9b1306bef 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,6 @@ devcon/analyze/** # Local Netlify folder .netlify +# Claude +.claude/settings.local.json + diff --git a/devcon-api/data/whitelisted-domains.csv b/devcon-api/data/whitelisted-domains.csv new file mode 100644 index 000000000..437e8479a --- /dev/null +++ b/devcon-api/data/whitelisted-domains.csv @@ -0,0 +1,3 @@ +domain +ethereum.org +university.edu diff --git a/devcon-api/src/controllers/whitelist.ts b/devcon-api/src/controllers/whitelist.ts new file mode 100644 index 000000000..b4eba1aa5 --- /dev/null +++ b/devcon-api/src/controllers/whitelist.ts @@ -0,0 +1,91 @@ +import { Request, Response, Router } from 'express' +import fs from 'fs' +import { join } from 'path' +import { SignJWT, jwtVerify } from 'jose' +import { parseCSV } from '@/utils/files' +import { sendMail } from '@/services/email' +import { CONFIG, SERVER_CONFIG } from '@/utils/config' + +export const whitelistRouter = Router() + +whitelistRouter.post('/whitelist/send-form-link', sendFormLink) +whitelistRouter.get('/whitelist/verify', verifyToken) + +async function sendFormLink(req: Request, res: Response) { + const { email } = req.body + + if (!email || typeof email !== 'string') { + return res.status(400).send({ status: 400, message: 'Email is required' }) + } + + const domain = email.split('@')[1]?.toLowerCase() + if (!domain) { + return res.status(400).send({ status: 400, message: 'Invalid email format' }) + } + + try { + const csvPath = join(CONFIG.DATA_FOLDER, 'whitelisted-domains.csv') + const csvData = fs.readFileSync(csvPath, 'utf-8') + const rows = await parseCSV(csvData) + const whitelistedDomains = rows.map((row: any) => row.domain?.toLowerCase().trim()) + + const isWhitelisted = whitelistedDomains.includes(domain) + + if (!isWhitelisted) { + // Not whitelisted — return the form URL directly (no token), they can still apply + const formUrl = SERVER_CONFIG.WHITELIST_FORM_URL + return res.status(200).send({ status: 200, message: 'Form link ready', data: { formUrl, whitelisted: false } }) + } + + // Whitelisted — sign a JWT and email the form link with the token pre-filled + const secret = new TextEncoder().encode(SERVER_CONFIG.WHITELIST_JWT_SECRET) + const token = await new SignJWT({ email, domain }) + .setProtectedHeader({ alg: 'HS256' }) + .setExpirationTime('7d') + .setIssuedAt() + .sign(secret) + + const formUrl = `${SERVER_CONFIG.WHITELIST_FORM_URL}?usp=pp_url&${SERVER_CONFIG.WHITELIST_FORM_TOKEN_FIELD}=${token}` + + if (SERVER_CONFIG.NODE_ENV === 'development') { + // Dev mode — skip email, return the form URL directly + return res.status(200).send({ status: 200, message: 'Form link ready (dev mode)', data: { formUrl, whitelisted: true } }) + } + + await sendMail(email, 'email-cta', 'Your Devcon Form Link', { + TITLE: 'Your Devcon Form Link', + DESCRIPTION: 'Click the button below to access the form.', + CALL_TO_ACTION: 'Open Form', + URL: formUrl, + }) + + return res.status(200).send({ status: 200, message: 'Form link sent', data: { whitelisted: true } }) + } catch (error) { + console.error('Error in send-form-link:', error) + return res.status(500).send({ status: 500, message: 'Internal server error' }) + } +} + +async function verifyToken(req: Request, res: Response) { + const { token } = req.query + + if (!token || typeof token !== 'string') { + return res.status(400).send({ status: 400, data: { whitelisted: false }, message: 'Token is required' }) + } + + try { + const secret = new TextEncoder().encode(SERVER_CONFIG.WHITELIST_JWT_SECRET) + const { payload } = await jwtVerify(token, secret) + + return res.status(200).send({ + status: 200, + data: { + whitelisted: true, + email: payload.email, + domain: payload.domain, + }, + }) + } catch (error) { + return res.status(401).send({ status: 401, data: { whitelisted: false }, message: 'Invalid or expired token' }) + } +} diff --git a/devcon-api/src/routes.ts b/devcon-api/src/routes.ts index c09c1d3a7..2be3a9d30 100644 --- a/devcon-api/src/routes.ts +++ b/devcon-api/src/routes.ts @@ -7,6 +7,7 @@ import { aiRouter } from '@/controllers/ai' import { atSlurperRouter } from '@/controllers/at-slurper' import { destinoRouter } from '@/controllers/destino' import { hooksRouter } from '@/controllers/hooks' +import { whitelistRouter } from '@/controllers/whitelist' import { Router } from 'express' import { API_INFO } from '@/utils/config' @@ -27,3 +28,4 @@ router.use(aiRouter) router.use(atSlurperRouter) router.use(destinoRouter) router.use(hooksRouter) +router.use(whitelistRouter) diff --git a/devcon-api/src/utils/config.ts b/devcon-api/src/utils/config.ts index 23b95e41a..11a731777 100644 --- a/devcon-api/src/utils/config.ts +++ b/devcon-api/src/utils/config.ts @@ -51,6 +51,10 @@ export const SERVER_CONFIG = { SMTP_PASSWORD: process.env.SMTP_PASSWORD, ACCREDITATION_GUIDE_URL: process.env.ACCREDITATION_GUIDE_URL || '', + + WHITELIST_JWT_SECRET: process.env.WHITELIST_JWT_SECRET || '', + WHITELIST_FORM_URL: process.env.WHITELIST_FORM_URL || '', + WHITELIST_FORM_TOKEN_FIELD: process.env.WHITELIST_FORM_TOKEN_FIELD || 'entry.123456789', } export const PRETALX_CONFIG = { diff --git a/devcon/.agents/skills/find-skills/SKILL.md b/devcon/.agents/skills/find-skills/SKILL.md new file mode 100644 index 000000000..c797184ee --- /dev/null +++ b/devcon/.agents/skills/find-skills/SKILL.md @@ -0,0 +1,133 @@ +--- +name: find-skills +description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill. +--- + +# Find Skills + +This skill helps you discover and install skills from the open agent skills ecosystem. + +## When to Use This Skill + +Use this skill when the user: + +- Asks "how do I do X" where X might be a common task with an existing skill +- Says "find a skill for X" or "is there a skill for X" +- Asks "can you do X" where X is a specialized capability +- Expresses interest in extending agent capabilities +- Wants to search for tools, templates, or workflows +- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.) + +## What is the Skills CLI? + +The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools. + +**Key commands:** + +- `npx skills find [query]` - Search for skills interactively or by keyword +- `npx skills add ` - Install a skill from GitHub or other sources +- `npx skills check` - Check for skill updates +- `npx skills update` - Update all installed skills + +**Browse skills at:** https://skills.sh/ + +## How to Help Users Find Skills + +### Step 1: Understand What They Need + +When a user asks for help with something, identify: + +1. The domain (e.g., React, testing, design, deployment) +2. The specific task (e.g., writing tests, creating animations, reviewing PRs) +3. Whether this is a common enough task that a skill likely exists + +### Step 2: Search for Skills + +Run the find command with a relevant query: + +```bash +npx skills find [query] +``` + +For example: + +- User asks "how do I make my React app faster?" → `npx skills find react performance` +- User asks "can you help me with PR reviews?" → `npx skills find pr review` +- User asks "I need to create a changelog" → `npx skills find changelog` + +The command will return results like: + +``` +Install with npx skills add + +vercel-labs/agent-skills@vercel-react-best-practices +└ https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices +``` + +### Step 3: Present Options to the User + +When you find relevant skills, present them to the user with: + +1. The skill name and what it does +2. The install command they can run +3. A link to learn more at skills.sh + +Example response: + +``` +I found a skill that might help! The "vercel-react-best-practices" skill provides +React and Next.js performance optimization guidelines from Vercel Engineering. + +To install it: +npx skills add vercel-labs/agent-skills@vercel-react-best-practices + +Learn more: https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices +``` + +### Step 4: Offer to Install + +If the user wants to proceed, you can install the skill for them: + +```bash +npx skills add -g -y +``` + +The `-g` flag installs globally (user-level) and `-y` skips confirmation prompts. + +## Common Skill Categories + +When searching, consider these common categories: + +| Category | Example Queries | +| --------------- | ---------------------------------------- | +| Web Development | react, nextjs, typescript, css, tailwind | +| Testing | testing, jest, playwright, e2e | +| DevOps | deploy, docker, kubernetes, ci-cd | +| Documentation | docs, readme, changelog, api-docs | +| Code Quality | review, lint, refactor, best-practices | +| Design | ui, ux, design-system, accessibility | +| Productivity | workflow, automation, git | + +## Tips for Effective Searches + +1. **Use specific keywords**: "react testing" is better than just "testing" +2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd" +3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills` + +## When No Skills Are Found + +If no relevant skills exist: + +1. Acknowledge that no existing skill was found +2. Offer to help with the task directly using your general capabilities +3. Suggest the user could create their own skill with `npx skills init` + +Example: + +``` +I searched for skills related to "xyz" but didn't find any matches. +I can still help you with this task directly! Would you like me to proceed? + +If this is something you do often, you could create your own skill: +npx skills init my-xyz-skill +``` diff --git a/devcon/.agents/skills/skill-creator/LICENSE.txt b/devcon/.agents/skills/skill-creator/LICENSE.txt new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/devcon/.agents/skills/skill-creator/SKILL.md b/devcon/.agents/skills/skill-creator/SKILL.md new file mode 100644 index 000000000..942bfe896 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/SKILL.md @@ -0,0 +1,479 @@ +--- +name: skill-creator +description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, update or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy. +--- + +# Skill Creator + +A skill for creating new skills and iteratively improving them. + +At a high level, the process of creating a skill goes like this: + +- Decide what you want the skill to do and roughly how it should do it +- Write a draft of the skill +- Create a few test prompts and run claude-with-access-to-the-skill on them +- Help the user evaluate the results both qualitatively and quantitatively + - While the runs happen in the background, draft some quantitative evals if there aren't any (if there are some, you can either use as is or modify if you feel something needs to change about them). Then explain them to the user (or if they already existed, explain the ones that already exist) + - Use the `eval-viewer/generate_review.py` script to show the user the results for them to look at, and also let them look at the quantitative metrics +- Rewrite the skill based on feedback from the user's evaluation of the results (and also if there are any glaring flaws that become apparent from the quantitative benchmarks) +- Repeat until you're satisfied +- Expand the test set and try again at larger scale + +Your job when using this skill is to figure out where the user is in this process and then jump in and help them progress through these stages. So for instance, maybe they're like "I want to make a skill for X". You can help narrow down what they mean, write a draft, write the test cases, figure out how they want to evaluate, run all the prompts, and repeat. + +On the other hand, maybe they already have a draft of the skill. In this case you can go straight to the eval/iterate part of the loop. + +Of course, you should always be flexible and if the user is like "I don't need to run a bunch of evaluations, just vibe with me", you can do that instead. + +Then after the skill is done (but again, the order is flexible), you can also run the skill description improver, which we have a whole separate script for, to optimize the triggering of the skill. + +Cool? Cool. + +## Communicating with the user + +The skill creator is liable to be used by people across a wide range of familiarity with coding jargon. If you haven't heard (and how could you, it's only very recently that it started), there's a trend now where the power of Claude is inspiring plumbers to open up their terminals, parents and grandparents to google "how to install npm". On the other hand, the bulk of users are probably fairly computer-literate. + +So please pay attention to context cues to understand how to phrase your communication! In the default case, just to give you some idea: + +- "evaluation" and "benchmark" are borderline, but OK +- for "JSON" and "assertion" you want to see serious cues from the user that they know what those things are before using them without explaining them + +It's OK to briefly explain terms if you're in doubt, and feel free to clarify terms with a short definition if you're unsure if the user will get it. + +--- + +## Creating a skill + +### Capture Intent + +Start by understanding the user's intent. The current conversation might already contain a workflow the user wants to capture (e.g., they say "turn this into a skill"). If so, extract answers from the conversation history first — the tools used, the sequence of steps, corrections the user made, input/output formats observed. The user may need to fill the gaps, and should confirm before proceeding to the next step. + +1. What should this skill enable Claude to do? +2. When should this skill trigger? (what user phrases/contexts) +3. What's the expected output format? +4. Should we set up test cases to verify the skill works? Skills with objectively verifiable outputs (file transforms, data extraction, code generation, fixed workflow steps) benefit from test cases. Skills with subjective outputs (writing style, art) often don't need them. Suggest the appropriate default based on the skill type, but let the user decide. + +### Interview and Research + +Proactively ask questions about edge cases, input/output formats, example files, success criteria, and dependencies. Wait to write test prompts until you've got this part ironed out. + +Check available MCPs - if useful for research (searching docs, finding similar skills, looking up best practices), research in parallel via subagents if available, otherwise inline. Come prepared with context to reduce burden on the user. + +### Write the SKILL.md + +Based on the user interview, fill in these components: + +- **name**: Skill identifier +- **description**: When to trigger, what it does. This is the primary triggering mechanism - include both what the skill does AND specific contexts for when to use it. All "when to use" info goes here, not in the body. Note: currently Claude has a tendency to "undertrigger" skills -- to not use them when they'd be useful. To combat this, please make the skill descriptions a little bit "pushy". So for instance, instead of "How to build a simple fast dashboard to display internal Anthropic data.", you might write "How to build a simple fast dashboard to display internal Anthropic data. Make sure to use this skill whenever the user mentions dashboards, data visualization, internal metrics, or wants to display any kind of company data, even if they don't explicitly ask for a 'dashboard.'" +- **compatibility**: Required tools, dependencies (optional, rarely needed) +- **the rest of the skill :)** + +### Skill Writing Guide + +#### Anatomy of a Skill + +``` +skill-name/ +├── SKILL.md (required) +│ ├── YAML frontmatter (name, description required) +│ └── Markdown instructions +└── Bundled Resources (optional) + ├── scripts/ - Executable code for deterministic/repetitive tasks + ├── references/ - Docs loaded into context as needed + └── assets/ - Files used in output (templates, icons, fonts) +``` + +#### Progressive Disclosure + +Skills use a three-level loading system: +1. **Metadata** (name + description) - Always in context (~100 words) +2. **SKILL.md body** - In context whenever skill triggers (<500 lines ideal) +3. **Bundled resources** - As needed (unlimited, scripts can execute without loading) + +These word counts are approximate and you can feel free to go longer if needed. + +**Key patterns:** +- Keep SKILL.md under 500 lines; if you're approaching this limit, add an additional layer of hierarchy along with clear pointers about where the model using the skill should go next to follow up. +- Reference files clearly from SKILL.md with guidance on when to read them +- For large reference files (>300 lines), include a table of contents + +**Domain organization**: When a skill supports multiple domains/frameworks, organize by variant: +``` +cloud-deploy/ +├── SKILL.md (workflow + selection) +└── references/ + ├── aws.md + ├── gcp.md + └── azure.md +``` +Claude reads only the relevant reference file. + +#### Principle of Lack of Surprise + +This goes without saying, but skills must not contain malware, exploit code, or any content that could compromise system security. A skill's contents should not surprise the user in their intent if described. Don't go along with requests to create misleading skills or skills designed to facilitate unauthorized access, data exfiltration, or other malicious activities. Things like a "roleplay as an XYZ" are OK though. + +#### Writing Patterns + +Prefer using the imperative form in instructions. + +**Defining output formats** - You can do it like this: +```markdown +## Report structure +ALWAYS use this exact template: +# [Title] +## Executive summary +## Key findings +## Recommendations +``` + +**Examples pattern** - It's useful to include examples. You can format them like this (but if "Input" and "Output" are in the examples you might want to deviate a little): +```markdown +## Commit message format +**Example 1:** +Input: Added user authentication with JWT tokens +Output: feat(auth): implement JWT-based authentication +``` + +### Writing Style + +Try to explain to the model why things are important in lieu of heavy-handed musty MUSTs. Use theory of mind and try to make the skill general and not super-narrow to specific examples. Start by writing a draft and then look at it with fresh eyes and improve it. + +### Test Cases + +After writing the skill draft, come up with 2-3 realistic test prompts — the kind of thing a real user would actually say. Share them with the user: [you don't have to use this exact language] "Here are a few test cases I'd like to try. Do these look right, or do you want to add more?" Then run them. + +Save test cases to `evals/evals.json`. Don't write assertions yet — just the prompts. You'll draft assertions in the next step while the runs are in progress. + +```json +{ + "skill_name": "example-skill", + "evals": [ + { + "id": 1, + "prompt": "User's task prompt", + "expected_output": "Description of expected result", + "files": [] + } + ] +} +``` + +See `references/schemas.md` for the full schema (including the `assertions` field, which you'll add later). + +## Running and evaluating test cases + +This section is one continuous sequence — don't stop partway through. Do NOT use `/skill-test` or any other testing skill. + +Put results in `-workspace/` as a sibling to the skill directory. Within the workspace, organize results by iteration (`iteration-1/`, `iteration-2/`, etc.) and within that, each test case gets a directory (`eval-0/`, `eval-1/`, etc.). Don't create all of this upfront — just create directories as you go. + +### Step 1: Spawn all runs (with-skill AND baseline) in the same turn + +For each test case, spawn two subagents in the same turn — one with the skill, one without. This is important: don't spawn the with-skill runs first and then come back for baselines later. Launch everything at once so it all finishes around the same time. + +**With-skill run:** + +``` +Execute this task: +- Skill path: +- Task: +- Input files: +- Save outputs to: /iteration-/eval-/with_skill/outputs/ +- Outputs to save: +``` + +**Baseline run** (same prompt, but the baseline depends on context): +- **Creating a new skill**: no skill at all. Same prompt, no skill path, save to `without_skill/outputs/`. +- **Improving an existing skill**: the old version. Before editing, snapshot the skill (`cp -r /skill-snapshot/`), then point the baseline subagent at the snapshot. Save to `old_skill/outputs/`. + +Write an `eval_metadata.json` for each test case (assertions can be empty for now). Give each eval a descriptive name based on what it's testing — not just "eval-0". Use this name for the directory too. If this iteration uses new or modified eval prompts, create these files for each new eval directory — don't assume they carry over from previous iterations. + +```json +{ + "eval_id": 0, + "eval_name": "descriptive-name-here", + "prompt": "The user's task prompt", + "assertions": [] +} +``` + +### Step 2: While runs are in progress, draft assertions + +Don't just wait for the runs to finish — you can use this time productively. Draft quantitative assertions for each test case and explain them to the user. If assertions already exist in `evals/evals.json`, review them and explain what they check. + +Good assertions are objectively verifiable and have descriptive names — they should read clearly in the benchmark viewer so someone glancing at the results immediately understands what each one checks. Subjective skills (writing style, design quality) are better evaluated qualitatively — don't force assertions onto things that need human judgment. + +Update the `eval_metadata.json` files and `evals/evals.json` with the assertions once drafted. Also explain to the user what they'll see in the viewer — both the qualitative outputs and the quantitative benchmark. + +### Step 3: As runs complete, capture timing data + +When each subagent task completes, you receive a notification containing `total_tokens` and `duration_ms`. Save this data immediately to `timing.json` in the run directory: + +```json +{ + "total_tokens": 84852, + "duration_ms": 23332, + "total_duration_seconds": 23.3 +} +``` + +This is the only opportunity to capture this data — it comes through the task notification and isn't persisted elsewhere. Process each notification as it arrives rather than trying to batch them. + +### Step 4: Grade, aggregate, and launch the viewer + +Once all runs are done: + +1. **Grade each run** — spawn a grader subagent (or grade inline) that reads `agents/grader.md` and evaluates each assertion against the outputs. Save results to `grading.json` in each run directory. The grading.json expectations array must use the fields `text`, `passed`, and `evidence` (not `name`/`met`/`details` or other variants) — the viewer depends on these exact field names. For assertions that can be checked programmatically, write and run a script rather than eyeballing it — scripts are faster, more reliable, and can be reused across iterations. + +2. **Aggregate into benchmark** — run the aggregation script from the skill-creator directory: + ```bash + python -m scripts.aggregate_benchmark /iteration-N --skill-name + ``` + This produces `benchmark.json` and `benchmark.md` with pass_rate, time, and tokens for each configuration, with mean ± stddev and the delta. If generating benchmark.json manually, see `references/schemas.md` for the exact schema the viewer expects. +Put each with_skill version before its baseline counterpart. + +3. **Do an analyst pass** — read the benchmark data and surface patterns the aggregate stats might hide. See `agents/analyzer.md` (the "Analyzing Benchmark Results" section) for what to look for — things like assertions that always pass regardless of skill (non-discriminating), high-variance evals (possibly flaky), and time/token tradeoffs. + +4. **Launch the viewer** with both qualitative outputs and quantitative data: + ```bash + nohup python /eval-viewer/generate_review.py \ + /iteration-N \ + --skill-name "my-skill" \ + --benchmark /iteration-N/benchmark.json \ + > /dev/null 2>&1 & + VIEWER_PID=$! + ``` + For iteration 2+, also pass `--previous-workspace /iteration-`. + + **Cowork / headless environments:** If `webbrowser.open()` is not available or the environment has no display, use `--static ` to write a standalone HTML file instead of starting a server. Feedback will be downloaded as a `feedback.json` file when the user clicks "Submit All Reviews". After download, copy `feedback.json` into the workspace directory for the next iteration to pick up. + +Note: please use generate_review.py to create the viewer; there's no need to write custom HTML. + +5. **Tell the user** something like: "I've opened the results in your browser. There are two tabs — 'Outputs' lets you click through each test case and leave feedback, 'Benchmark' shows the quantitative comparison. When you're done, come back here and let me know." + +### What the user sees in the viewer + +The "Outputs" tab shows one test case at a time: +- **Prompt**: the task that was given +- **Output**: the files the skill produced, rendered inline where possible +- **Previous Output** (iteration 2+): collapsed section showing last iteration's output +- **Formal Grades** (if grading was run): collapsed section showing assertion pass/fail +- **Feedback**: a textbox that auto-saves as they type +- **Previous Feedback** (iteration 2+): their comments from last time, shown below the textbox + +The "Benchmark" tab shows the stats summary: pass rates, timing, and token usage for each configuration, with per-eval breakdowns and analyst observations. + +Navigation is via prev/next buttons or arrow keys. When done, they click "Submit All Reviews" which saves all feedback to `feedback.json`. + +### Step 5: Read the feedback + +When the user tells you they're done, read `feedback.json`: + +```json +{ + "reviews": [ + {"run_id": "eval-0-with_skill", "feedback": "the chart is missing axis labels", "timestamp": "..."}, + {"run_id": "eval-1-with_skill", "feedback": "", "timestamp": "..."}, + {"run_id": "eval-2-with_skill", "feedback": "perfect, love this", "timestamp": "..."} + ], + "status": "complete" +} +``` + +Empty feedback means the user thought it was fine. Focus your improvements on the test cases where the user had specific complaints. + +Kill the viewer server when you're done with it: + +```bash +kill $VIEWER_PID 2>/dev/null +``` + +--- + +## Improving the skill + +This is the heart of the loop. You've run the test cases, the user has reviewed the results, and now you need to make the skill better based on their feedback. + +### How to think about improvements + +1. **Generalize from the feedback.** The big picture thing that's happening here is that we're trying to create skills that can be used a million times (maybe literally, maybe even more who knows) across many different prompts. Here you and the user are iterating on only a few examples over and over again because it helps move faster. The user knows these examples in and out and it's quick for them to assess new outputs. But if the skill you and the user are codeveloping works only for those examples, it's useless. Rather than put in fiddly overfitty changes, or oppressively constrictive MUSTs, if there's some stubborn issue, you might try branching out and using different metaphors, or recommending different patterns of working. It's relatively cheap to try and maybe you'll land on something great. + +2. **Keep the prompt lean.** Remove things that aren't pulling their weight. Make sure to read the transcripts, not just the final outputs — if it looks like the skill is making the model waste a bunch of time doing things that are unproductive, you can try getting rid of the parts of the skill that are making it do that and seeing what happens. + +3. **Explain the why.** Try hard to explain the **why** behind everything you're asking the model to do. Today's LLMs are *smart*. They have good theory of mind and when given a good harness can go beyond rote instructions and really make things happen. Even if the feedback from the user is terse or frustrated, try to actually understand the task and why the user is writing what they wrote, and what they actually wrote, and then transmit this understanding into the instructions. If you find yourself writing ALWAYS or NEVER in all caps, or using super rigid structures, that's a yellow flag — if possible, reframe and explain the reasoning so that the model understands why the thing you're asking for is important. That's a more humane, powerful, and effective approach. + +4. **Look for repeated work across test cases.** Read the transcripts from the test runs and notice if the subagents all independently wrote similar helper scripts or took the same multi-step approach to something. If all 3 test cases resulted in the subagent writing a `create_docx.py` or a `build_chart.py`, that's a strong signal the skill should bundle that script. Write it once, put it in `scripts/`, and tell the skill to use it. This saves every future invocation from reinventing the wheel. + +This task is pretty important (we are trying to create billions a year in economic value here!) and your thinking time is not the blocker; take your time and really mull things over. I'd suggest writing a draft revision and then looking at it anew and making improvements. Really do your best to get into the head of the user and understand what they want and need. + +### The iteration loop + +After improving the skill: + +1. Apply your improvements to the skill +2. Rerun all test cases into a new `iteration-/` directory, including baseline runs. If you're creating a new skill, the baseline is always `without_skill` (no skill) — that stays the same across iterations. If you're improving an existing skill, use your judgment on what makes sense as the baseline: the original version the user came in with, or the previous iteration. +3. Launch the reviewer with `--previous-workspace` pointing at the previous iteration +4. Wait for the user to review and tell you they're done +5. Read the new feedback, improve again, repeat + +Keep going until: +- The user says they're happy +- The feedback is all empty (everything looks good) +- You're not making meaningful progress + +--- + +## Advanced: Blind comparison + +For situations where you want a more rigorous comparison between two versions of a skill (e.g., the user asks "is the new version actually better?"), there's a blind comparison system. Read `agents/comparator.md` and `agents/analyzer.md` for the details. The basic idea is: give two outputs to an independent agent without telling it which is which, and let it judge quality. Then analyze why the winner won. + +This is optional, requires subagents, and most users won't need it. The human review loop is usually sufficient. + +--- + +## Description Optimization + +The description field in SKILL.md frontmatter is the primary mechanism that determines whether Claude invokes a skill. After creating or improving a skill, offer to optimize the description for better triggering accuracy. + +### Step 1: Generate trigger eval queries + +Create 20 eval queries — a mix of should-trigger and should-not-trigger. Save as JSON: + +```json +[ + {"query": "the user prompt", "should_trigger": true}, + {"query": "another prompt", "should_trigger": false} +] +``` + +The queries must be realistic and something a Claude Code or Claude.ai user would actually type. Not abstract requests, but requests that are concrete and specific and have a good amount of detail. For instance, file paths, personal context about the user's job or situation, column names and values, company names, URLs. A little bit of backstory. Some might be in lowercase or contain abbreviations or typos or casual speech. Use a mix of different lengths, and focus on edge cases rather than making them clear-cut (the user will get a chance to sign off on them). + +Bad: `"Format this data"`, `"Extract text from PDF"`, `"Create a chart"` + +Good: `"ok so my boss just sent me this xlsx file (its in my downloads, called something like 'Q4 sales final FINAL v2.xlsx') and she wants me to add a column that shows the profit margin as a percentage. The revenue is in column C and costs are in column D i think"` + +For the **should-trigger** queries (8-10), think about coverage. You want different phrasings of the same intent — some formal, some casual. Include cases where the user doesn't explicitly name the skill or file type but clearly needs it. Throw in some uncommon use cases and cases where this skill competes with another but should win. + +For the **should-not-trigger** queries (8-10), the most valuable ones are the near-misses — queries that share keywords or concepts with the skill but actually need something different. Think adjacent domains, ambiguous phrasing where a naive keyword match would trigger but shouldn't, and cases where the query touches on something the skill does but in a context where another tool is more appropriate. + +The key thing to avoid: don't make should-not-trigger queries obviously irrelevant. "Write a fibonacci function" as a negative test for a PDF skill is too easy — it doesn't test anything. The negative cases should be genuinely tricky. + +### Step 2: Review with user + +Present the eval set to the user for review using the HTML template: + +1. Read the template from `assets/eval_review.html` +2. Replace the placeholders: + - `__EVAL_DATA_PLACEHOLDER__` → the JSON array of eval items (no quotes around it — it's a JS variable assignment) + - `__SKILL_NAME_PLACEHOLDER__` → the skill's name + - `__SKILL_DESCRIPTION_PLACEHOLDER__` → the skill's current description +3. Write to a temp file (e.g., `/tmp/eval_review_.html`) and open it: `open /tmp/eval_review_.html` +4. The user can edit queries, toggle should-trigger, add/remove entries, then click "Export Eval Set" +5. The file downloads to `~/Downloads/eval_set.json` — check the Downloads folder for the most recent version in case there are multiple (e.g., `eval_set (1).json`) + +This step matters — bad eval queries lead to bad descriptions. + +### Step 3: Run the optimization loop + +Tell the user: "This will take some time — I'll run the optimization loop in the background and check on it periodically." + +Save the eval set to the workspace, then run in the background: + +```bash +python -m scripts.run_loop \ + --eval-set \ + --skill-path \ + --model \ + --max-iterations 5 \ + --verbose +``` + +Use the model ID from your system prompt (the one powering the current session) so the triggering test matches what the user actually experiences. + +While it runs, periodically tail the output to give the user updates on which iteration it's on and what the scores look like. + +This handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude with extended thinking to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting. + +### How skill triggering works + +Understanding the triggering mechanism helps design better eval queries. Skills appear in Claude's `available_skills` list with their name + description, and Claude decides whether to consult a skill based on that description. The important thing to know is that Claude only consults skills for tasks it can't easily handle on its own — simple, one-step queries like "read this PDF" may not trigger a skill even if the description matches perfectly, because Claude can handle them directly with basic tools. Complex, multi-step, or specialized queries reliably trigger skills when the description matches. + +This means your eval queries should be substantive enough that Claude would actually benefit from consulting a skill. Simple queries like "read file X" are poor test cases — they won't trigger skills regardless of description quality. + +### Step 4: Apply the result + +Take `best_description` from the JSON output and update the skill's SKILL.md frontmatter. Show the user before/after and report the scores. + +--- + +### Package and Present (only if `present_files` tool is available) + +Check whether you have access to the `present_files` tool. If you don't, skip this step. If you do, package the skill and present the .skill file to the user: + +```bash +python -m scripts.package_skill +``` + +After packaging, direct the user to the resulting `.skill` file path so they can install it. + +--- + +## Claude.ai-specific instructions + +In Claude.ai, the core workflow is the same (draft → test → review → improve → repeat), but because Claude.ai doesn't have subagents, some mechanics change. Here's what to adapt: + +**Running test cases**: No subagents means no parallel execution. For each test case, read the skill's SKILL.md, then follow its instructions to accomplish the test prompt yourself. Do them one at a time. This is less rigorous than independent subagents (you wrote the skill and you're also running it, so you have full context), but it's a useful sanity check — and the human review step compensates. Skip the baseline runs — just use the skill to complete the task as requested. + +**Reviewing results**: If you can't open a browser (e.g., Claude.ai's VM has no display, or you're on a remote server), skip the browser reviewer entirely. Instead, present results directly in the conversation. For each test case, show the prompt and the output. If the output is a file the user needs to see (like a .docx or .xlsx), save it to the filesystem and tell them where it is so they can download and inspect it. Ask for feedback inline: "How does this look? Anything you'd change?" + +**Benchmarking**: Skip the quantitative benchmarking — it relies on baseline comparisons which aren't meaningful without subagents. Focus on qualitative feedback from the user. + +**The iteration loop**: Same as before — improve the skill, rerun the test cases, ask for feedback — just without the browser reviewer in the middle. You can still organize results into iteration directories on the filesystem if you have one. + +**Description optimization**: This section requires the `claude` CLI tool (specifically `claude -p`) which is only available in Claude Code. Skip it if you're on Claude.ai. + +**Blind comparison**: Requires subagents. Skip it. + +**Packaging**: The `package_skill.py` script works anywhere with Python and a filesystem. On Claude.ai, you can run it and the user can download the resulting `.skill` file. + +--- + +## Cowork-Specific Instructions + +If you're in Cowork, the main things to know are: + +- You have subagents, so the main workflow (spawn test cases in parallel, run baselines, grade, etc.) all works. (However, if you run into severe problems with timeouts, it's OK to run the test prompts in series rather than parallel.) +- You don't have a browser or display, so when generating the eval viewer, use `--static ` to write a standalone HTML file instead of starting a server. Then proffer a link that the user can click to open the HTML in their browser. +- For whatever reason, the Cowork setup seems to disincline Claude from generating the eval viewer after running the tests, so just to reiterate: whether you're in Cowork or in Claude Code, after running tests, you should always generate the eval viewer for the human to look at examples before revising the skill yourself and trying to make corrections, using `generate_review.py` (not writing your own boutique html code). Sorry in advance but I'm gonna go all caps here: GENERATE THE EVAL VIEWER *BEFORE* evaluating inputs yourself. You want to get them in front of the human ASAP! +- Feedback works differently: since there's no running server, the viewer's "Submit All Reviews" button will download `feedback.json` as a file. You can then read it from there (you may have to request access first). +- Packaging works — `package_skill.py` just needs Python and a filesystem. +- Description optimization (`run_loop.py` / `run_eval.py`) should work in Cowork just fine since it uses `claude -p` via subprocess, not a browser, but please save it until you've fully finished making the skill and the user agrees it's in good shape. + +--- + +## Reference files + +The agents/ directory contains instructions for specialized subagents. Read them when you need to spawn the relevant subagent. + +- `agents/grader.md` — How to evaluate assertions against outputs +- `agents/comparator.md` — How to do blind A/B comparison between two outputs +- `agents/analyzer.md` — How to analyze why one version beat another + +The references/ directory has additional documentation: +- `references/schemas.md` — JSON structures for evals.json, grading.json, etc. + +--- + +Repeating one more time the core loop here for emphasis: + +- Figure out what the skill is about +- Draft or edit the skill +- Run claude-with-access-to-the-skill on test prompts +- With the user, evaluate the outputs: + - Create benchmark.json and run `eval-viewer/generate_review.py` to help the user review them + - Run quantitative evals +- Repeat until you and the user are satisfied +- Package the final skill and return it to the user. + +Please add steps to your TodoList, if you have such a thing, to make sure you don't forget. If you're in Cowork, please specifically put "Create evals JSON and run `eval-viewer/generate_review.py` so human can review test cases" in your TodoList to make sure it happens. + +Good luck! diff --git a/devcon/.agents/skills/skill-creator/agents/analyzer.md b/devcon/.agents/skills/skill-creator/agents/analyzer.md new file mode 100644 index 000000000..14e41d606 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/agents/analyzer.md @@ -0,0 +1,274 @@ +# Post-hoc Analyzer Agent + +Analyze blind comparison results to understand WHY the winner won and generate improvement suggestions. + +## Role + +After the blind comparator determines a winner, the Post-hoc Analyzer "unblids" the results by examining the skills and transcripts. The goal is to extract actionable insights: what made the winner better, and how can the loser be improved? + +## Inputs + +You receive these parameters in your prompt: + +- **winner**: "A" or "B" (from blind comparison) +- **winner_skill_path**: Path to the skill that produced the winning output +- **winner_transcript_path**: Path to the execution transcript for the winner +- **loser_skill_path**: Path to the skill that produced the losing output +- **loser_transcript_path**: Path to the execution transcript for the loser +- **comparison_result_path**: Path to the blind comparator's output JSON +- **output_path**: Where to save the analysis results + +## Process + +### Step 1: Read Comparison Result + +1. Read the blind comparator's output at comparison_result_path +2. Note the winning side (A or B), the reasoning, and any scores +3. Understand what the comparator valued in the winning output + +### Step 2: Read Both Skills + +1. Read the winner skill's SKILL.md and key referenced files +2. Read the loser skill's SKILL.md and key referenced files +3. Identify structural differences: + - Instructions clarity and specificity + - Script/tool usage patterns + - Example coverage + - Edge case handling + +### Step 3: Read Both Transcripts + +1. Read the winner's transcript +2. Read the loser's transcript +3. Compare execution patterns: + - How closely did each follow their skill's instructions? + - What tools were used differently? + - Where did the loser diverge from optimal behavior? + - Did either encounter errors or make recovery attempts? + +### Step 4: Analyze Instruction Following + +For each transcript, evaluate: +- Did the agent follow the skill's explicit instructions? +- Did the agent use the skill's provided tools/scripts? +- Were there missed opportunities to leverage skill content? +- Did the agent add unnecessary steps not in the skill? + +Score instruction following 1-10 and note specific issues. + +### Step 5: Identify Winner Strengths + +Determine what made the winner better: +- Clearer instructions that led to better behavior? +- Better scripts/tools that produced better output? +- More comprehensive examples that guided edge cases? +- Better error handling guidance? + +Be specific. Quote from skills/transcripts where relevant. + +### Step 6: Identify Loser Weaknesses + +Determine what held the loser back: +- Ambiguous instructions that led to suboptimal choices? +- Missing tools/scripts that forced workarounds? +- Gaps in edge case coverage? +- Poor error handling that caused failures? + +### Step 7: Generate Improvement Suggestions + +Based on the analysis, produce actionable suggestions for improving the loser skill: +- Specific instruction changes to make +- Tools/scripts to add or modify +- Examples to include +- Edge cases to address + +Prioritize by impact. Focus on changes that would have changed the outcome. + +### Step 8: Write Analysis Results + +Save structured analysis to `{output_path}`. + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "comparison_summary": { + "winner": "A", + "winner_skill": "path/to/winner/skill", + "loser_skill": "path/to/loser/skill", + "comparator_reasoning": "Brief summary of why comparator chose winner" + }, + "winner_strengths": [ + "Clear step-by-step instructions for handling multi-page documents", + "Included validation script that caught formatting errors", + "Explicit guidance on fallback behavior when OCR fails" + ], + "loser_weaknesses": [ + "Vague instruction 'process the document appropriately' led to inconsistent behavior", + "No script for validation, agent had to improvise and made errors", + "No guidance on OCR failure, agent gave up instead of trying alternatives" + ], + "instruction_following": { + "winner": { + "score": 9, + "issues": [ + "Minor: skipped optional logging step" + ] + }, + "loser": { + "score": 6, + "issues": [ + "Did not use the skill's formatting template", + "Invented own approach instead of following step 3", + "Missed the 'always validate output' instruction" + ] + } + }, + "improvement_suggestions": [ + { + "priority": "high", + "category": "instructions", + "suggestion": "Replace 'process the document appropriately' with explicit steps: 1) Extract text, 2) Identify sections, 3) Format per template", + "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior" + }, + { + "priority": "high", + "category": "tools", + "suggestion": "Add validate_output.py script similar to winner skill's validation approach", + "expected_impact": "Would catch formatting errors before final output" + }, + { + "priority": "medium", + "category": "error_handling", + "suggestion": "Add fallback instructions: 'If OCR fails, try: 1) different resolution, 2) image preprocessing, 3) manual extraction'", + "expected_impact": "Would prevent early failure on difficult documents" + } + ], + "transcript_insights": { + "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script -> Fixed 2 issues -> Produced output", + "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods -> No validation -> Output had errors" + } +} +``` + +## Guidelines + +- **Be specific**: Quote from skills and transcripts, don't just say "instructions were unclear" +- **Be actionable**: Suggestions should be concrete changes, not vague advice +- **Focus on skill improvements**: The goal is to improve the losing skill, not critique the agent +- **Prioritize by impact**: Which changes would most likely have changed the outcome? +- **Consider causation**: Did the skill weakness actually cause the worse output, or is it incidental? +- **Stay objective**: Analyze what happened, don't editorialize +- **Think about generalization**: Would this improvement help on other evals too? + +## Categories for Suggestions + +Use these categories to organize improvement suggestions: + +| Category | Description | +|----------|-------------| +| `instructions` | Changes to the skill's prose instructions | +| `tools` | Scripts, templates, or utilities to add/modify | +| `examples` | Example inputs/outputs to include | +| `error_handling` | Guidance for handling failures | +| `structure` | Reorganization of skill content | +| `references` | External docs or resources to add | + +## Priority Levels + +- **high**: Would likely change the outcome of this comparison +- **medium**: Would improve quality but may not change win/loss +- **low**: Nice to have, marginal improvement + +--- + +# Analyzing Benchmark Results + +When analyzing benchmark results, the analyzer's purpose is to **surface patterns and anomalies** across multiple runs, not suggest skill improvements. + +## Role + +Review all benchmark run results and generate freeform notes that help the user understand skill performance. Focus on patterns that wouldn't be visible from aggregate metrics alone. + +## Inputs + +You receive these parameters in your prompt: + +- **benchmark_data_path**: Path to the in-progress benchmark.json with all run results +- **skill_path**: Path to the skill being benchmarked +- **output_path**: Where to save the notes (as JSON array of strings) + +## Process + +### Step 1: Read Benchmark Data + +1. Read the benchmark.json containing all run results +2. Note the configurations tested (with_skill, without_skill) +3. Understand the run_summary aggregates already calculated + +### Step 2: Analyze Per-Assertion Patterns + +For each expectation across all runs: +- Does it **always pass** in both configurations? (may not differentiate skill value) +- Does it **always fail** in both configurations? (may be broken or beyond capability) +- Does it **always pass with skill but fail without**? (skill clearly adds value here) +- Does it **always fail with skill but pass without**? (skill may be hurting) +- Is it **highly variable**? (flaky expectation or non-deterministic behavior) + +### Step 3: Analyze Cross-Eval Patterns + +Look for patterns across evals: +- Are certain eval types consistently harder/easier? +- Do some evals show high variance while others are stable? +- Are there surprising results that contradict expectations? + +### Step 4: Analyze Metrics Patterns + +Look at time_seconds, tokens, tool_calls: +- Does the skill significantly increase execution time? +- Is there high variance in resource usage? +- Are there outlier runs that skew the aggregates? + +### Step 5: Generate Notes + +Write freeform observations as a list of strings. Each note should: +- State a specific observation +- Be grounded in the data (not speculation) +- Help the user understand something the aggregate metrics don't show + +Examples: +- "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value" +- "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure that may be flaky" +- "Without-skill runs consistently fail on table extraction expectations (0% pass rate)" +- "Skill adds 13s average execution time but improves pass rate by 50%" +- "Token usage is 80% higher with skill, primarily due to script output parsing" +- "All 3 without-skill runs for eval 1 produced empty output" + +### Step 6: Write Notes + +Save notes to `{output_path}` as a JSON array of strings: + +```json +[ + "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value", + "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure", + "Without-skill runs consistently fail on table extraction expectations", + "Skill adds 13s average execution time but improves pass rate by 50%" +] +``` + +## Guidelines + +**DO:** +- Report what you observe in the data +- Be specific about which evals, expectations, or runs you're referring to +- Note patterns that aggregate metrics would hide +- Provide context that helps interpret the numbers + +**DO NOT:** +- Suggest improvements to the skill (that's for the improvement step, not benchmarking) +- Make subjective quality judgments ("the output was good/bad") +- Speculate about causes without evidence +- Repeat information already in the run_summary aggregates diff --git a/devcon/.agents/skills/skill-creator/agents/comparator.md b/devcon/.agents/skills/skill-creator/agents/comparator.md new file mode 100644 index 000000000..80e00eb45 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/agents/comparator.md @@ -0,0 +1,202 @@ +# Blind Comparator Agent + +Compare two outputs WITHOUT knowing which skill produced them. + +## Role + +The Blind Comparator judges which output better accomplishes the eval task. You receive two outputs labeled A and B, but you do NOT know which skill produced which. This prevents bias toward a particular skill or approach. + +Your judgment is based purely on output quality and task completion. + +## Inputs + +You receive these parameters in your prompt: + +- **output_a_path**: Path to the first output file or directory +- **output_b_path**: Path to the second output file or directory +- **eval_prompt**: The original task/prompt that was executed +- **expectations**: List of expectations to check (optional - may be empty) + +## Process + +### Step 1: Read Both Outputs + +1. Examine output A (file or directory) +2. Examine output B (file or directory) +3. Note the type, structure, and content of each +4. If outputs are directories, examine all relevant files inside + +### Step 2: Understand the Task + +1. Read the eval_prompt carefully +2. Identify what the task requires: + - What should be produced? + - What qualities matter (accuracy, completeness, format)? + - What would distinguish a good output from a poor one? + +### Step 3: Generate Evaluation Rubric + +Based on the task, generate a rubric with two dimensions: + +**Content Rubric** (what the output contains): +| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) | +|-----------|----------|----------------|---------------| +| Correctness | Major errors | Minor errors | Fully correct | +| Completeness | Missing key elements | Mostly complete | All elements present | +| Accuracy | Significant inaccuracies | Minor inaccuracies | Accurate throughout | + +**Structure Rubric** (how the output is organized): +| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) | +|-----------|----------|----------------|---------------| +| Organization | Disorganized | Reasonably organized | Clear, logical structure | +| Formatting | Inconsistent/broken | Mostly consistent | Professional, polished | +| Usability | Difficult to use | Usable with effort | Easy to use | + +Adapt criteria to the specific task. For example: +- PDF form → "Field alignment", "Text readability", "Data placement" +- Document → "Section structure", "Heading hierarchy", "Paragraph flow" +- Data output → "Schema correctness", "Data types", "Completeness" + +### Step 4: Evaluate Each Output Against the Rubric + +For each output (A and B): + +1. **Score each criterion** on the rubric (1-5 scale) +2. **Calculate dimension totals**: Content score, Structure score +3. **Calculate overall score**: Average of dimension scores, scaled to 1-10 + +### Step 5: Check Assertions (if provided) + +If expectations are provided: + +1. Check each expectation against output A +2. Check each expectation against output B +3. Count pass rates for each output +4. Use expectation scores as secondary evidence (not the primary decision factor) + +### Step 6: Determine the Winner + +Compare A and B based on (in priority order): + +1. **Primary**: Overall rubric score (content + structure) +2. **Secondary**: Assertion pass rates (if applicable) +3. **Tiebreaker**: If truly equal, declare a TIE + +Be decisive - ties should be rare. One output is usually better, even if marginally. + +### Step 7: Write Comparison Results + +Save results to a JSON file at the path specified (or `comparison.json` if not specified). + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "winner": "A", + "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.", + "rubric": { + "A": { + "content": { + "correctness": 5, + "completeness": 5, + "accuracy": 4 + }, + "structure": { + "organization": 4, + "formatting": 5, + "usability": 4 + }, + "content_score": 4.7, + "structure_score": 4.3, + "overall_score": 9.0 + }, + "B": { + "content": { + "correctness": 3, + "completeness": 2, + "accuracy": 3 + }, + "structure": { + "organization": 3, + "formatting": 2, + "usability": 3 + }, + "content_score": 2.7, + "structure_score": 2.7, + "overall_score": 5.4 + } + }, + "output_quality": { + "A": { + "score": 9, + "strengths": ["Complete solution", "Well-formatted", "All fields present"], + "weaknesses": ["Minor style inconsistency in header"] + }, + "B": { + "score": 5, + "strengths": ["Readable output", "Correct basic structure"], + "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"] + } + }, + "expectation_results": { + "A": { + "passed": 4, + "total": 5, + "pass_rate": 0.80, + "details": [ + {"text": "Output includes name", "passed": true}, + {"text": "Output includes date", "passed": true}, + {"text": "Format is PDF", "passed": true}, + {"text": "Contains signature", "passed": false}, + {"text": "Readable text", "passed": true} + ] + }, + "B": { + "passed": 3, + "total": 5, + "pass_rate": 0.60, + "details": [ + {"text": "Output includes name", "passed": true}, + {"text": "Output includes date", "passed": false}, + {"text": "Format is PDF", "passed": true}, + {"text": "Contains signature", "passed": false}, + {"text": "Readable text", "passed": true} + ] + } + } +} +``` + +If no expectations were provided, omit the `expectation_results` field entirely. + +## Field Descriptions + +- **winner**: "A", "B", or "TIE" +- **reasoning**: Clear explanation of why the winner was chosen (or why it's a tie) +- **rubric**: Structured rubric evaluation for each output + - **content**: Scores for content criteria (correctness, completeness, accuracy) + - **structure**: Scores for structure criteria (organization, formatting, usability) + - **content_score**: Average of content criteria (1-5) + - **structure_score**: Average of structure criteria (1-5) + - **overall_score**: Combined score scaled to 1-10 +- **output_quality**: Summary quality assessment + - **score**: 1-10 rating (should match rubric overall_score) + - **strengths**: List of positive aspects + - **weaknesses**: List of issues or shortcomings +- **expectation_results**: (Only if expectations provided) + - **passed**: Number of expectations that passed + - **total**: Total number of expectations + - **pass_rate**: Fraction passed (0.0 to 1.0) + - **details**: Individual expectation results + +## Guidelines + +- **Stay blind**: DO NOT try to infer which skill produced which output. Judge purely on output quality. +- **Be specific**: Cite specific examples when explaining strengths and weaknesses. +- **Be decisive**: Choose a winner unless outputs are genuinely equivalent. +- **Output quality first**: Assertion scores are secondary to overall task completion. +- **Be objective**: Don't favor outputs based on style preferences; focus on correctness and completeness. +- **Explain your reasoning**: The reasoning field should make it clear why you chose the winner. +- **Handle edge cases**: If both outputs fail, pick the one that fails less badly. If both are excellent, pick the one that's marginally better. diff --git a/devcon/.agents/skills/skill-creator/agents/grader.md b/devcon/.agents/skills/skill-creator/agents/grader.md new file mode 100644 index 000000000..558ab05c0 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/agents/grader.md @@ -0,0 +1,223 @@ +# Grader Agent + +Evaluate expectations against an execution transcript and outputs. + +## Role + +The Grader reviews a transcript and output files, then determines whether each expectation passes or fails. Provide clear evidence for each judgment. + +You have two jobs: grade the outputs, and critique the evals themselves. A passing grade on a weak assertion is worse than useless — it creates false confidence. When you notice an assertion that's trivially satisfied, or an important outcome that no assertion checks, say so. + +## Inputs + +You receive these parameters in your prompt: + +- **expectations**: List of expectations to evaluate (strings) +- **transcript_path**: Path to the execution transcript (markdown file) +- **outputs_dir**: Directory containing output files from execution + +## Process + +### Step 1: Read the Transcript + +1. Read the transcript file completely +2. Note the eval prompt, execution steps, and final result +3. Identify any issues or errors documented + +### Step 2: Examine Output Files + +1. List files in outputs_dir +2. Read/examine each file relevant to the expectations. If outputs aren't plain text, use the inspection tools provided in your prompt — don't rely solely on what the transcript says the executor produced. +3. Note contents, structure, and quality + +### Step 3: Evaluate Each Assertion + +For each expectation: + +1. **Search for evidence** in the transcript and outputs +2. **Determine verdict**: + - **PASS**: Clear evidence the expectation is true AND the evidence reflects genuine task completion, not just surface-level compliance + - **FAIL**: No evidence, or evidence contradicts the expectation, or the evidence is superficial (e.g., correct filename but empty/wrong content) +3. **Cite the evidence**: Quote the specific text or describe what you found + +### Step 4: Extract and Verify Claims + +Beyond the predefined expectations, extract implicit claims from the outputs and verify them: + +1. **Extract claims** from the transcript and outputs: + - Factual statements ("The form has 12 fields") + - Process claims ("Used pypdf to fill the form") + - Quality claims ("All fields were filled correctly") + +2. **Verify each claim**: + - **Factual claims**: Can be checked against the outputs or external sources + - **Process claims**: Can be verified from the transcript + - **Quality claims**: Evaluate whether the claim is justified + +3. **Flag unverifiable claims**: Note claims that cannot be verified with available information + +This catches issues that predefined expectations might miss. + +### Step 5: Read User Notes + +If `{outputs_dir}/user_notes.md` exists: +1. Read it and note any uncertainties or issues flagged by the executor +2. Include relevant concerns in the grading output +3. These may reveal problems even when expectations pass + +### Step 6: Critique the Evals + +After grading, consider whether the evals themselves could be improved. Only surface suggestions when there's a clear gap. + +Good suggestions test meaningful outcomes — assertions that are hard to satisfy without actually doing the work correctly. Think about what makes an assertion *discriminating*: it passes when the skill genuinely succeeds and fails when it doesn't. + +Suggestions worth raising: +- An assertion that passed but would also pass for a clearly wrong output (e.g., checking filename existence but not file content) +- An important outcome you observed — good or bad — that no assertion covers at all +- An assertion that can't actually be verified from the available outputs + +Keep the bar high. The goal is to flag things the eval author would say "good catch" about, not to nitpick every assertion. + +### Step 7: Write Grading Results + +Save results to `{outputs_dir}/../grading.json` (sibling to outputs_dir). + +## Grading Criteria + +**PASS when**: +- The transcript or outputs clearly demonstrate the expectation is true +- Specific evidence can be cited +- The evidence reflects genuine substance, not just surface compliance (e.g., a file exists AND contains correct content, not just the right filename) + +**FAIL when**: +- No evidence found for the expectation +- Evidence contradicts the expectation +- The expectation cannot be verified from available information +- The evidence is superficial — the assertion is technically satisfied but the underlying task outcome is wrong or incomplete +- The output appears to meet the assertion by coincidence rather than by actually doing the work + +**When uncertain**: The burden of proof to pass is on the expectation. + +### Step 8: Read Executor Metrics and Timing + +1. If `{outputs_dir}/metrics.json` exists, read it and include in grading output +2. If `{outputs_dir}/../timing.json` exists, read it and include timing data + +## Output Format + +Write a JSON file with this structure: + +```json +{ + "expectations": [ + { + "text": "The output includes the name 'John Smith'", + "passed": true, + "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'" + }, + { + "text": "The spreadsheet has a SUM formula in cell B10", + "passed": false, + "evidence": "No spreadsheet was created. The output was a text file." + }, + { + "text": "The assistant used the skill's OCR script", + "passed": true, + "evidence": "Transcript Step 2 shows: 'Tool: Bash - python ocr_script.py image.png'" + } + ], + "summary": { + "passed": 2, + "failed": 1, + "total": 3, + "pass_rate": 0.67 + }, + "execution_metrics": { + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8 + }, + "total_tool_calls": 15, + "total_steps": 6, + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 + }, + "timing": { + "executor_duration_seconds": 165.0, + "grader_duration_seconds": 26.0, + "total_duration_seconds": 191.0 + }, + "claims": [ + { + "claim": "The form has 12 fillable fields", + "type": "factual", + "verified": true, + "evidence": "Counted 12 fields in field_info.json" + }, + { + "claim": "All required fields were populated", + "type": "quality", + "verified": false, + "evidence": "Reference section was left blank despite data being available" + } + ], + "user_notes_summary": { + "uncertainties": ["Used 2023 data, may be stale"], + "needs_review": [], + "workarounds": ["Fell back to text overlay for non-fillable fields"] + }, + "eval_feedback": { + "suggestions": [ + { + "assertion": "The output includes the name 'John Smith'", + "reason": "A hallucinated document that mentions the name would also pass — consider checking it appears as the primary contact with matching phone and email from the input" + }, + { + "reason": "No assertion checks whether the extracted phone numbers match the input — I observed incorrect numbers in the output that went uncaught" + } + ], + "overall": "Assertions check presence but not correctness. Consider adding content verification." + } +} +``` + +## Field Descriptions + +- **expectations**: Array of graded expectations + - **text**: The original expectation text + - **passed**: Boolean - true if expectation passes + - **evidence**: Specific quote or description supporting the verdict +- **summary**: Aggregate statistics + - **passed**: Count of passed expectations + - **failed**: Count of failed expectations + - **total**: Total expectations evaluated + - **pass_rate**: Fraction passed (0.0 to 1.0) +- **execution_metrics**: Copied from executor's metrics.json (if available) + - **output_chars**: Total character count of output files (proxy for tokens) + - **transcript_chars**: Character count of transcript +- **timing**: Wall clock timing from timing.json (if available) + - **executor_duration_seconds**: Time spent in executor subagent + - **total_duration_seconds**: Total elapsed time for the run +- **claims**: Extracted and verified claims from the output + - **claim**: The statement being verified + - **type**: "factual", "process", or "quality" + - **verified**: Boolean - whether the claim holds + - **evidence**: Supporting or contradicting evidence +- **user_notes_summary**: Issues flagged by the executor + - **uncertainties**: Things the executor wasn't sure about + - **needs_review**: Items requiring human attention + - **workarounds**: Places where the skill didn't work as expected +- **eval_feedback**: Improvement suggestions for the evals (only when warranted) + - **suggestions**: List of concrete suggestions, each with a `reason` and optionally an `assertion` it relates to + - **overall**: Brief assessment — can be "No suggestions, evals look solid" if nothing to flag + +## Guidelines + +- **Be objective**: Base verdicts on evidence, not assumptions +- **Be specific**: Quote the exact text that supports your verdict +- **Be thorough**: Check both transcript and output files +- **Be consistent**: Apply the same standard to each expectation +- **Explain failures**: Make it clear why evidence was insufficient +- **No partial credit**: Each expectation is pass or fail, not partial diff --git a/devcon/.agents/skills/skill-creator/assets/eval_review.html b/devcon/.agents/skills/skill-creator/assets/eval_review.html new file mode 100644 index 000000000..938ff32ae --- /dev/null +++ b/devcon/.agents/skills/skill-creator/assets/eval_review.html @@ -0,0 +1,146 @@ + + + + + + Eval Set Review - __SKILL_NAME_PLACEHOLDER__ + + + + + + +

Eval Set Review: __SKILL_NAME_PLACEHOLDER__

+

Current description: __SKILL_DESCRIPTION_PLACEHOLDER__

+ +
+ + +
+ + + + + + + + + + +
QueryShould TriggerActions
+ +

+ + + + diff --git a/devcon/.agents/skills/skill-creator/eval-viewer/generate_review.py b/devcon/.agents/skills/skill-creator/eval-viewer/generate_review.py new file mode 100644 index 000000000..7fa597863 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/eval-viewer/generate_review.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +"""Generate and serve a review page for eval results. + +Reads the workspace directory, discovers runs (directories with outputs/), +embeds all output data into a self-contained HTML page, and serves it via +a tiny HTTP server. Feedback auto-saves to feedback.json in the workspace. + +Usage: + python generate_review.py [--port PORT] [--skill-name NAME] + python generate_review.py --previous-feedback /path/to/old/feedback.json + +No dependencies beyond the Python stdlib are required. +""" + +import argparse +import base64 +import json +import mimetypes +import os +import re +import signal +import subprocess +import sys +import time +import webbrowser +from functools import partial +from http.server import HTTPServer, BaseHTTPRequestHandler +from pathlib import Path + +# Files to exclude from output listings +METADATA_FILES = {"transcript.md", "user_notes.md", "metrics.json"} + +# Extensions we render as inline text +TEXT_EXTENSIONS = { + ".txt", ".md", ".json", ".csv", ".py", ".js", ".ts", ".tsx", ".jsx", + ".yaml", ".yml", ".xml", ".html", ".css", ".sh", ".rb", ".go", ".rs", + ".java", ".c", ".cpp", ".h", ".hpp", ".sql", ".r", ".toml", +} + +# Extensions we render as inline images +IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".svg", ".webp"} + +# MIME type overrides for common types +MIME_OVERRIDES = { + ".svg": "image/svg+xml", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", +} + + +def get_mime_type(path: Path) -> str: + ext = path.suffix.lower() + if ext in MIME_OVERRIDES: + return MIME_OVERRIDES[ext] + mime, _ = mimetypes.guess_type(str(path)) + return mime or "application/octet-stream" + + +def find_runs(workspace: Path) -> list[dict]: + """Recursively find directories that contain an outputs/ subdirectory.""" + runs: list[dict] = [] + _find_runs_recursive(workspace, workspace, runs) + runs.sort(key=lambda r: (r.get("eval_id", float("inf")), r["id"])) + return runs + + +def _find_runs_recursive(root: Path, current: Path, runs: list[dict]) -> None: + if not current.is_dir(): + return + + outputs_dir = current / "outputs" + if outputs_dir.is_dir(): + run = build_run(root, current) + if run: + runs.append(run) + return + + skip = {"node_modules", ".git", "__pycache__", "skill", "inputs"} + for child in sorted(current.iterdir()): + if child.is_dir() and child.name not in skip: + _find_runs_recursive(root, child, runs) + + +def build_run(root: Path, run_dir: Path) -> dict | None: + """Build a run dict with prompt, outputs, and grading data.""" + prompt = "" + eval_id = None + + # Try eval_metadata.json + for candidate in [run_dir / "eval_metadata.json", run_dir.parent / "eval_metadata.json"]: + if candidate.exists(): + try: + metadata = json.loads(candidate.read_text()) + prompt = metadata.get("prompt", "") + eval_id = metadata.get("eval_id") + except (json.JSONDecodeError, OSError): + pass + if prompt: + break + + # Fall back to transcript.md + if not prompt: + for candidate in [run_dir / "transcript.md", run_dir / "outputs" / "transcript.md"]: + if candidate.exists(): + try: + text = candidate.read_text() + match = re.search(r"## Eval Prompt\n\n([\s\S]*?)(?=\n##|$)", text) + if match: + prompt = match.group(1).strip() + except OSError: + pass + if prompt: + break + + if not prompt: + prompt = "(No prompt found)" + + run_id = str(run_dir.relative_to(root)).replace("/", "-").replace("\\", "-") + + # Collect output files + outputs_dir = run_dir / "outputs" + output_files: list[dict] = [] + if outputs_dir.is_dir(): + for f in sorted(outputs_dir.iterdir()): + if f.is_file() and f.name not in METADATA_FILES: + output_files.append(embed_file(f)) + + # Load grading if present + grading = None + for candidate in [run_dir / "grading.json", run_dir.parent / "grading.json"]: + if candidate.exists(): + try: + grading = json.loads(candidate.read_text()) + except (json.JSONDecodeError, OSError): + pass + if grading: + break + + return { + "id": run_id, + "prompt": prompt, + "eval_id": eval_id, + "outputs": output_files, + "grading": grading, + } + + +def embed_file(path: Path) -> dict: + """Read a file and return an embedded representation.""" + ext = path.suffix.lower() + mime = get_mime_type(path) + + if ext in TEXT_EXTENSIONS: + try: + content = path.read_text(errors="replace") + except OSError: + content = "(Error reading file)" + return { + "name": path.name, + "type": "text", + "content": content, + } + elif ext in IMAGE_EXTENSIONS: + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "image", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".pdf": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "pdf", + "data_uri": f"data:{mime};base64,{b64}", + } + elif ext == ".xlsx": + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "xlsx", + "data_b64": b64, + } + else: + # Binary / unknown — base64 download link + try: + raw = path.read_bytes() + b64 = base64.b64encode(raw).decode("ascii") + except OSError: + return {"name": path.name, "type": "error", "content": "(Error reading file)"} + return { + "name": path.name, + "type": "binary", + "mime": mime, + "data_uri": f"data:{mime};base64,{b64}", + } + + +def load_previous_iteration(workspace: Path) -> dict[str, dict]: + """Load previous iteration's feedback and outputs. + + Returns a map of run_id -> {"feedback": str, "outputs": list[dict]}. + """ + result: dict[str, dict] = {} + + # Load feedback + feedback_map: dict[str, str] = {} + feedback_path = workspace / "feedback.json" + if feedback_path.exists(): + try: + data = json.loads(feedback_path.read_text()) + feedback_map = { + r["run_id"]: r["feedback"] + for r in data.get("reviews", []) + if r.get("feedback", "").strip() + } + except (json.JSONDecodeError, OSError, KeyError): + pass + + # Load runs (to get outputs) + prev_runs = find_runs(workspace) + for run in prev_runs: + result[run["id"]] = { + "feedback": feedback_map.get(run["id"], ""), + "outputs": run.get("outputs", []), + } + + # Also add feedback for run_ids that had feedback but no matching run + for run_id, fb in feedback_map.items(): + if run_id not in result: + result[run_id] = {"feedback": fb, "outputs": []} + + return result + + +def generate_html( + runs: list[dict], + skill_name: str, + previous: dict[str, dict] | None = None, + benchmark: dict | None = None, +) -> str: + """Generate the complete standalone HTML page with embedded data.""" + template_path = Path(__file__).parent / "viewer.html" + template = template_path.read_text() + + # Build previous_feedback and previous_outputs maps for the template + previous_feedback: dict[str, str] = {} + previous_outputs: dict[str, list[dict]] = {} + if previous: + for run_id, data in previous.items(): + if data.get("feedback"): + previous_feedback[run_id] = data["feedback"] + if data.get("outputs"): + previous_outputs[run_id] = data["outputs"] + + embedded = { + "skill_name": skill_name, + "runs": runs, + "previous_feedback": previous_feedback, + "previous_outputs": previous_outputs, + } + if benchmark: + embedded["benchmark"] = benchmark + + data_json = json.dumps(embedded) + + return template.replace("/*__EMBEDDED_DATA__*/", f"const EMBEDDED_DATA = {data_json};") + + +# --------------------------------------------------------------------------- +# HTTP server (stdlib only, zero dependencies) +# --------------------------------------------------------------------------- + +def _kill_port(port: int) -> None: + """Kill any process listening on the given port.""" + try: + result = subprocess.run( + ["lsof", "-ti", f":{port}"], + capture_output=True, text=True, timeout=5, + ) + for pid_str in result.stdout.strip().split("\n"): + if pid_str.strip(): + try: + os.kill(int(pid_str.strip()), signal.SIGTERM) + except (ProcessLookupError, ValueError): + pass + if result.stdout.strip(): + time.sleep(0.5) + except subprocess.TimeoutExpired: + pass + except FileNotFoundError: + print("Note: lsof not found, cannot check if port is in use", file=sys.stderr) + +class ReviewHandler(BaseHTTPRequestHandler): + """Serves the review HTML and handles feedback saves. + + Regenerates the HTML on each page load so that refreshing the browser + picks up new eval outputs without restarting the server. + """ + + def __init__( + self, + workspace: Path, + skill_name: str, + feedback_path: Path, + previous: dict[str, dict], + benchmark_path: Path | None, + *args, + **kwargs, + ): + self.workspace = workspace + self.skill_name = skill_name + self.feedback_path = feedback_path + self.previous = previous + self.benchmark_path = benchmark_path + super().__init__(*args, **kwargs) + + def do_GET(self) -> None: + if self.path == "/" or self.path == "/index.html": + # Regenerate HTML on each request (re-scans workspace for new outputs) + runs = find_runs(self.workspace) + benchmark = None + if self.benchmark_path and self.benchmark_path.exists(): + try: + benchmark = json.loads(self.benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + html = generate_html(runs, self.skill_name, self.previous, benchmark) + content = html.encode("utf-8") + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.send_header("Content-Length", str(len(content))) + self.end_headers() + self.wfile.write(content) + elif self.path == "/api/feedback": + data = b"{}" + if self.feedback_path.exists(): + data = self.feedback_path.read_bytes() + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(data))) + self.end_headers() + self.wfile.write(data) + else: + self.send_error(404) + + def do_POST(self) -> None: + if self.path == "/api/feedback": + length = int(self.headers.get("Content-Length", 0)) + body = self.rfile.read(length) + try: + data = json.loads(body) + if not isinstance(data, dict) or "reviews" not in data: + raise ValueError("Expected JSON object with 'reviews' key") + self.feedback_path.write_text(json.dumps(data, indent=2) + "\n") + resp = b'{"ok":true}' + self.send_response(200) + except (json.JSONDecodeError, OSError, ValueError) as e: + resp = json.dumps({"error": str(e)}).encode() + self.send_response(500) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(resp))) + self.end_headers() + self.wfile.write(resp) + else: + self.send_error(404) + + def log_message(self, format: str, *args: object) -> None: + # Suppress request logging to keep terminal clean + pass + + +def main() -> None: + parser = argparse.ArgumentParser(description="Generate and serve eval review") + parser.add_argument("workspace", type=Path, help="Path to workspace directory") + parser.add_argument("--port", "-p", type=int, default=3117, help="Server port (default: 3117)") + parser.add_argument("--skill-name", "-n", type=str, default=None, help="Skill name for header") + parser.add_argument( + "--previous-workspace", type=Path, default=None, + help="Path to previous iteration's workspace (shows old outputs and feedback as context)", + ) + parser.add_argument( + "--benchmark", type=Path, default=None, + help="Path to benchmark.json to show in the Benchmark tab", + ) + parser.add_argument( + "--static", "-s", type=Path, default=None, + help="Write standalone HTML to this path instead of starting a server", + ) + args = parser.parse_args() + + workspace = args.workspace.resolve() + if not workspace.is_dir(): + print(f"Error: {workspace} is not a directory", file=sys.stderr) + sys.exit(1) + + runs = find_runs(workspace) + if not runs: + print(f"No runs found in {workspace}", file=sys.stderr) + sys.exit(1) + + skill_name = args.skill_name or workspace.name.replace("-workspace", "") + feedback_path = workspace / "feedback.json" + + previous: dict[str, dict] = {} + if args.previous_workspace: + previous = load_previous_iteration(args.previous_workspace.resolve()) + + benchmark_path = args.benchmark.resolve() if args.benchmark else None + benchmark = None + if benchmark_path and benchmark_path.exists(): + try: + benchmark = json.loads(benchmark_path.read_text()) + except (json.JSONDecodeError, OSError): + pass + + if args.static: + html = generate_html(runs, skill_name, previous, benchmark) + args.static.parent.mkdir(parents=True, exist_ok=True) + args.static.write_text(html) + print(f"\n Static viewer written to: {args.static}\n") + sys.exit(0) + + # Kill any existing process on the target port + port = args.port + _kill_port(port) + handler = partial(ReviewHandler, workspace, skill_name, feedback_path, previous, benchmark_path) + try: + server = HTTPServer(("127.0.0.1", port), handler) + except OSError: + # Port still in use after kill attempt — find a free one + server = HTTPServer(("127.0.0.1", 0), handler) + port = server.server_address[1] + + url = f"http://localhost:{port}" + print(f"\n Eval Viewer") + print(f" ─────────────────────────────────") + print(f" URL: {url}") + print(f" Workspace: {workspace}") + print(f" Feedback: {feedback_path}") + if previous: + print(f" Previous: {args.previous_workspace} ({len(previous)} runs)") + if benchmark_path: + print(f" Benchmark: {benchmark_path}") + print(f"\n Press Ctrl+C to stop.\n") + + webbrowser.open(url) + + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nStopped.") + server.server_close() + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/eval-viewer/viewer.html b/devcon/.agents/skills/skill-creator/eval-viewer/viewer.html new file mode 100644 index 000000000..6d8e96348 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/eval-viewer/viewer.html @@ -0,0 +1,1325 @@ + + + + + + Eval Review + + + + + + + +
+
+
+

Eval Review:

+
Review each output and leave feedback below. Navigate with arrow keys or buttons. When done, copy feedback and paste into Claude Code.
+
+
+
+ + + + + +
+
+ +
+
Prompt
+
+
+
+
+ + +
+
Output
+
+
No output files found
+
+
+ + + + + + + + +
+
Your Feedback
+
+ + + +
+
+
+ + +
+ + +
+
+
No benchmark data available. Run a benchmark to see quantitative results here.
+
+
+
+ + +
+
+

Review Complete

+

Your feedback has been saved. Go back to your Claude Code session and tell Claude you're done reviewing.

+
+ +
+
+
+ + +
+ + + + diff --git a/devcon/.agents/skills/skill-creator/references/schemas.md b/devcon/.agents/skills/skill-creator/references/schemas.md new file mode 100644 index 000000000..b6eeaa2d4 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/references/schemas.md @@ -0,0 +1,430 @@ +# JSON Schemas + +This document defines the JSON schemas used by skill-creator. + +--- + +## evals.json + +Defines the evals for a skill. Located at `evals/evals.json` within the skill directory. + +```json +{ + "skill_name": "example-skill", + "evals": [ + { + "id": 1, + "prompt": "User's example prompt", + "expected_output": "Description of expected result", + "files": ["evals/files/sample1.pdf"], + "expectations": [ + "The output includes X", + "The skill used script Y" + ] + } + ] +} +``` + +**Fields:** +- `skill_name`: Name matching the skill's frontmatter +- `evals[].id`: Unique integer identifier +- `evals[].prompt`: The task to execute +- `evals[].expected_output`: Human-readable description of success +- `evals[].files`: Optional list of input file paths (relative to skill root) +- `evals[].expectations`: List of verifiable statements + +--- + +## history.json + +Tracks version progression in Improve mode. Located at workspace root. + +```json +{ + "started_at": "2026-01-15T10:30:00Z", + "skill_name": "pdf", + "current_best": "v2", + "iterations": [ + { + "version": "v0", + "parent": null, + "expectation_pass_rate": 0.65, + "grading_result": "baseline", + "is_current_best": false + }, + { + "version": "v1", + "parent": "v0", + "expectation_pass_rate": 0.75, + "grading_result": "won", + "is_current_best": false + }, + { + "version": "v2", + "parent": "v1", + "expectation_pass_rate": 0.85, + "grading_result": "won", + "is_current_best": true + } + ] +} +``` + +**Fields:** +- `started_at`: ISO timestamp of when improvement started +- `skill_name`: Name of the skill being improved +- `current_best`: Version identifier of the best performer +- `iterations[].version`: Version identifier (v0, v1, ...) +- `iterations[].parent`: Parent version this was derived from +- `iterations[].expectation_pass_rate`: Pass rate from grading +- `iterations[].grading_result`: "baseline", "won", "lost", or "tie" +- `iterations[].is_current_best`: Whether this is the current best version + +--- + +## grading.json + +Output from the grader agent. Located at `/grading.json`. + +```json +{ + "expectations": [ + { + "text": "The output includes the name 'John Smith'", + "passed": true, + "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'" + }, + { + "text": "The spreadsheet has a SUM formula in cell B10", + "passed": false, + "evidence": "No spreadsheet was created. The output was a text file." + } + ], + "summary": { + "passed": 2, + "failed": 1, + "total": 3, + "pass_rate": 0.67 + }, + "execution_metrics": { + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8 + }, + "total_tool_calls": 15, + "total_steps": 6, + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 + }, + "timing": { + "executor_duration_seconds": 165.0, + "grader_duration_seconds": 26.0, + "total_duration_seconds": 191.0 + }, + "claims": [ + { + "claim": "The form has 12 fillable fields", + "type": "factual", + "verified": true, + "evidence": "Counted 12 fields in field_info.json" + } + ], + "user_notes_summary": { + "uncertainties": ["Used 2023 data, may be stale"], + "needs_review": [], + "workarounds": ["Fell back to text overlay for non-fillable fields"] + }, + "eval_feedback": { + "suggestions": [ + { + "assertion": "The output includes the name 'John Smith'", + "reason": "A hallucinated document that mentions the name would also pass" + } + ], + "overall": "Assertions check presence but not correctness." + } +} +``` + +**Fields:** +- `expectations[]`: Graded expectations with evidence +- `summary`: Aggregate pass/fail counts +- `execution_metrics`: Tool usage and output size (from executor's metrics.json) +- `timing`: Wall clock timing (from timing.json) +- `claims`: Extracted and verified claims from the output +- `user_notes_summary`: Issues flagged by the executor +- `eval_feedback`: (optional) Improvement suggestions for the evals, only present when the grader identifies issues worth raising + +--- + +## metrics.json + +Output from the executor agent. Located at `/outputs/metrics.json`. + +```json +{ + "tool_calls": { + "Read": 5, + "Write": 2, + "Bash": 8, + "Edit": 1, + "Glob": 2, + "Grep": 0 + }, + "total_tool_calls": 18, + "total_steps": 6, + "files_created": ["filled_form.pdf", "field_values.json"], + "errors_encountered": 0, + "output_chars": 12450, + "transcript_chars": 3200 +} +``` + +**Fields:** +- `tool_calls`: Count per tool type +- `total_tool_calls`: Sum of all tool calls +- `total_steps`: Number of major execution steps +- `files_created`: List of output files created +- `errors_encountered`: Number of errors during execution +- `output_chars`: Total character count of output files +- `transcript_chars`: Character count of transcript + +--- + +## timing.json + +Wall clock timing for a run. Located at `/timing.json`. + +**How to capture:** When a subagent task completes, the task notification includes `total_tokens` and `duration_ms`. Save these immediately — they are not persisted anywhere else and cannot be recovered after the fact. + +```json +{ + "total_tokens": 84852, + "duration_ms": 23332, + "total_duration_seconds": 23.3, + "executor_start": "2026-01-15T10:30:00Z", + "executor_end": "2026-01-15T10:32:45Z", + "executor_duration_seconds": 165.0, + "grader_start": "2026-01-15T10:32:46Z", + "grader_end": "2026-01-15T10:33:12Z", + "grader_duration_seconds": 26.0 +} +``` + +--- + +## benchmark.json + +Output from Benchmark mode. Located at `benchmarks//benchmark.json`. + +```json +{ + "metadata": { + "skill_name": "pdf", + "skill_path": "/path/to/pdf", + "executor_model": "claude-sonnet-4-20250514", + "analyzer_model": "most-capable-model", + "timestamp": "2026-01-15T10:30:00Z", + "evals_run": [1, 2, 3], + "runs_per_configuration": 3 + }, + + "runs": [ + { + "eval_id": 1, + "eval_name": "Ocean", + "configuration": "with_skill", + "run_number": 1, + "result": { + "pass_rate": 0.85, + "passed": 6, + "failed": 1, + "total": 7, + "time_seconds": 42.5, + "tokens": 3800, + "tool_calls": 18, + "errors": 0 + }, + "expectations": [ + {"text": "...", "passed": true, "evidence": "..."} + ], + "notes": [ + "Used 2023 data, may be stale", + "Fell back to text overlay for non-fillable fields" + ] + } + ], + + "run_summary": { + "with_skill": { + "pass_rate": {"mean": 0.85, "stddev": 0.05, "min": 0.80, "max": 0.90}, + "time_seconds": {"mean": 45.0, "stddev": 12.0, "min": 32.0, "max": 58.0}, + "tokens": {"mean": 3800, "stddev": 400, "min": 3200, "max": 4100} + }, + "without_skill": { + "pass_rate": {"mean": 0.35, "stddev": 0.08, "min": 0.28, "max": 0.45}, + "time_seconds": {"mean": 32.0, "stddev": 8.0, "min": 24.0, "max": 42.0}, + "tokens": {"mean": 2100, "stddev": 300, "min": 1800, "max": 2500} + }, + "delta": { + "pass_rate": "+0.50", + "time_seconds": "+13.0", + "tokens": "+1700" + } + }, + + "notes": [ + "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value", + "Eval 3 shows high variance (50% ± 40%) - may be flaky or model-dependent", + "Without-skill runs consistently fail on table extraction expectations", + "Skill adds 13s average execution time but improves pass rate by 50%" + ] +} +``` + +**Fields:** +- `metadata`: Information about the benchmark run + - `skill_name`: Name of the skill + - `timestamp`: When the benchmark was run + - `evals_run`: List of eval names or IDs + - `runs_per_configuration`: Number of runs per config (e.g. 3) +- `runs[]`: Individual run results + - `eval_id`: Numeric eval identifier + - `eval_name`: Human-readable eval name (used as section header in the viewer) + - `configuration`: Must be `"with_skill"` or `"without_skill"` (the viewer uses this exact string for grouping and color coding) + - `run_number`: Integer run number (1, 2, 3...) + - `result`: Nested object with `pass_rate`, `passed`, `total`, `time_seconds`, `tokens`, `errors` +- `run_summary`: Statistical aggregates per configuration + - `with_skill` / `without_skill`: Each contains `pass_rate`, `time_seconds`, `tokens` objects with `mean` and `stddev` fields + - `delta`: Difference strings like `"+0.50"`, `"+13.0"`, `"+1700"` +- `notes`: Freeform observations from the analyzer + +**Important:** The viewer reads these field names exactly. Using `config` instead of `configuration`, or putting `pass_rate` at the top level of a run instead of nested under `result`, will cause the viewer to show empty/zero values. Always reference this schema when generating benchmark.json manually. + +--- + +## comparison.json + +Output from blind comparator. Located at `/comparison-N.json`. + +```json +{ + "winner": "A", + "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.", + "rubric": { + "A": { + "content": { + "correctness": 5, + "completeness": 5, + "accuracy": 4 + }, + "structure": { + "organization": 4, + "formatting": 5, + "usability": 4 + }, + "content_score": 4.7, + "structure_score": 4.3, + "overall_score": 9.0 + }, + "B": { + "content": { + "correctness": 3, + "completeness": 2, + "accuracy": 3 + }, + "structure": { + "organization": 3, + "formatting": 2, + "usability": 3 + }, + "content_score": 2.7, + "structure_score": 2.7, + "overall_score": 5.4 + } + }, + "output_quality": { + "A": { + "score": 9, + "strengths": ["Complete solution", "Well-formatted", "All fields present"], + "weaknesses": ["Minor style inconsistency in header"] + }, + "B": { + "score": 5, + "strengths": ["Readable output", "Correct basic structure"], + "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"] + } + }, + "expectation_results": { + "A": { + "passed": 4, + "total": 5, + "pass_rate": 0.80, + "details": [ + {"text": "Output includes name", "passed": true} + ] + }, + "B": { + "passed": 3, + "total": 5, + "pass_rate": 0.60, + "details": [ + {"text": "Output includes name", "passed": true} + ] + } + } +} +``` + +--- + +## analysis.json + +Output from post-hoc analyzer. Located at `/analysis.json`. + +```json +{ + "comparison_summary": { + "winner": "A", + "winner_skill": "path/to/winner/skill", + "loser_skill": "path/to/loser/skill", + "comparator_reasoning": "Brief summary of why comparator chose winner" + }, + "winner_strengths": [ + "Clear step-by-step instructions for handling multi-page documents", + "Included validation script that caught formatting errors" + ], + "loser_weaknesses": [ + "Vague instruction 'process the document appropriately' led to inconsistent behavior", + "No script for validation, agent had to improvise" + ], + "instruction_following": { + "winner": { + "score": 9, + "issues": ["Minor: skipped optional logging step"] + }, + "loser": { + "score": 6, + "issues": [ + "Did not use the skill's formatting template", + "Invented own approach instead of following step 3" + ] + } + }, + "improvement_suggestions": [ + { + "priority": "high", + "category": "instructions", + "suggestion": "Replace 'process the document appropriately' with explicit steps", + "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior" + } + ], + "transcript_insights": { + "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script", + "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods" + } +} +``` diff --git a/devcon/.agents/skills/skill-creator/scripts/aggregate_benchmark.py b/devcon/.agents/skills/skill-creator/scripts/aggregate_benchmark.py new file mode 100755 index 000000000..3e66e8c10 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/aggregate_benchmark.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 +""" +Aggregate individual run results into benchmark summary statistics. + +Reads grading.json files from run directories and produces: +- run_summary with mean, stddev, min, max for each metric +- delta between with_skill and without_skill configurations + +Usage: + python aggregate_benchmark.py + +Example: + python aggregate_benchmark.py benchmarks/2026-01-15T10-30-00/ + +The script supports two directory layouts: + + Workspace layout (from skill-creator iterations): + / + └── eval-N/ + ├── with_skill/ + │ ├── run-1/grading.json + │ └── run-2/grading.json + └── without_skill/ + ├── run-1/grading.json + └── run-2/grading.json + + Legacy layout (with runs/ subdirectory): + / + └── runs/ + └── eval-N/ + ├── with_skill/ + │ └── run-1/grading.json + └── without_skill/ + └── run-1/grading.json +""" + +import argparse +import json +import math +import sys +from datetime import datetime, timezone +from pathlib import Path + + +def calculate_stats(values: list[float]) -> dict: + """Calculate mean, stddev, min, max for a list of values.""" + if not values: + return {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0} + + n = len(values) + mean = sum(values) / n + + if n > 1: + variance = sum((x - mean) ** 2 for x in values) / (n - 1) + stddev = math.sqrt(variance) + else: + stddev = 0.0 + + return { + "mean": round(mean, 4), + "stddev": round(stddev, 4), + "min": round(min(values), 4), + "max": round(max(values), 4) + } + + +def load_run_results(benchmark_dir: Path) -> dict: + """ + Load all run results from a benchmark directory. + + Returns dict keyed by config name (e.g. "with_skill"/"without_skill", + or "new_skill"/"old_skill"), each containing a list of run results. + """ + # Support both layouts: eval dirs directly under benchmark_dir, or under runs/ + runs_dir = benchmark_dir / "runs" + if runs_dir.exists(): + search_dir = runs_dir + elif list(benchmark_dir.glob("eval-*")): + search_dir = benchmark_dir + else: + print(f"No eval directories found in {benchmark_dir} or {benchmark_dir / 'runs'}") + return {} + + results: dict[str, list] = {} + + for eval_idx, eval_dir in enumerate(sorted(search_dir.glob("eval-*"))): + metadata_path = eval_dir / "eval_metadata.json" + if metadata_path.exists(): + try: + with open(metadata_path) as mf: + eval_id = json.load(mf).get("eval_id", eval_idx) + except (json.JSONDecodeError, OSError): + eval_id = eval_idx + else: + try: + eval_id = int(eval_dir.name.split("-")[1]) + except ValueError: + eval_id = eval_idx + + # Discover config directories dynamically rather than hardcoding names + for config_dir in sorted(eval_dir.iterdir()): + if not config_dir.is_dir(): + continue + # Skip non-config directories (inputs, outputs, etc.) + if not list(config_dir.glob("run-*")): + continue + config = config_dir.name + if config not in results: + results[config] = [] + + for run_dir in sorted(config_dir.glob("run-*")): + run_number = int(run_dir.name.split("-")[1]) + grading_file = run_dir / "grading.json" + + if not grading_file.exists(): + print(f"Warning: grading.json not found in {run_dir}") + continue + + try: + with open(grading_file) as f: + grading = json.load(f) + except json.JSONDecodeError as e: + print(f"Warning: Invalid JSON in {grading_file}: {e}") + continue + + # Extract metrics + result = { + "eval_id": eval_id, + "run_number": run_number, + "pass_rate": grading.get("summary", {}).get("pass_rate", 0.0), + "passed": grading.get("summary", {}).get("passed", 0), + "failed": grading.get("summary", {}).get("failed", 0), + "total": grading.get("summary", {}).get("total", 0), + } + + # Extract timing — check grading.json first, then sibling timing.json + timing = grading.get("timing", {}) + result["time_seconds"] = timing.get("total_duration_seconds", 0.0) + timing_file = run_dir / "timing.json" + if result["time_seconds"] == 0.0 and timing_file.exists(): + try: + with open(timing_file) as tf: + timing_data = json.load(tf) + result["time_seconds"] = timing_data.get("total_duration_seconds", 0.0) + result["tokens"] = timing_data.get("total_tokens", 0) + except json.JSONDecodeError: + pass + + # Extract metrics if available + metrics = grading.get("execution_metrics", {}) + result["tool_calls"] = metrics.get("total_tool_calls", 0) + if not result.get("tokens"): + result["tokens"] = metrics.get("output_chars", 0) + result["errors"] = metrics.get("errors_encountered", 0) + + # Extract expectations — viewer requires fields: text, passed, evidence + raw_expectations = grading.get("expectations", []) + for exp in raw_expectations: + if "text" not in exp or "passed" not in exp: + print(f"Warning: expectation in {grading_file} missing required fields (text, passed, evidence): {exp}") + result["expectations"] = raw_expectations + + # Extract notes from user_notes_summary + notes_summary = grading.get("user_notes_summary", {}) + notes = [] + notes.extend(notes_summary.get("uncertainties", [])) + notes.extend(notes_summary.get("needs_review", [])) + notes.extend(notes_summary.get("workarounds", [])) + result["notes"] = notes + + results[config].append(result) + + return results + + +def aggregate_results(results: dict) -> dict: + """ + Aggregate run results into summary statistics. + + Returns run_summary with stats for each configuration and delta. + """ + run_summary = {} + configs = list(results.keys()) + + for config in configs: + runs = results.get(config, []) + + if not runs: + run_summary[config] = { + "pass_rate": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "time_seconds": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}, + "tokens": {"mean": 0, "stddev": 0, "min": 0, "max": 0} + } + continue + + pass_rates = [r["pass_rate"] for r in runs] + times = [r["time_seconds"] for r in runs] + tokens = [r.get("tokens", 0) for r in runs] + + run_summary[config] = { + "pass_rate": calculate_stats(pass_rates), + "time_seconds": calculate_stats(times), + "tokens": calculate_stats(tokens) + } + + # Calculate delta between the first two configs (if two exist) + if len(configs) >= 2: + primary = run_summary.get(configs[0], {}) + baseline = run_summary.get(configs[1], {}) + else: + primary = run_summary.get(configs[0], {}) if configs else {} + baseline = {} + + delta_pass_rate = primary.get("pass_rate", {}).get("mean", 0) - baseline.get("pass_rate", {}).get("mean", 0) + delta_time = primary.get("time_seconds", {}).get("mean", 0) - baseline.get("time_seconds", {}).get("mean", 0) + delta_tokens = primary.get("tokens", {}).get("mean", 0) - baseline.get("tokens", {}).get("mean", 0) + + run_summary["delta"] = { + "pass_rate": f"{delta_pass_rate:+.2f}", + "time_seconds": f"{delta_time:+.1f}", + "tokens": f"{delta_tokens:+.0f}" + } + + return run_summary + + +def generate_benchmark(benchmark_dir: Path, skill_name: str = "", skill_path: str = "") -> dict: + """ + Generate complete benchmark.json from run results. + """ + results = load_run_results(benchmark_dir) + run_summary = aggregate_results(results) + + # Build runs array for benchmark.json + runs = [] + for config in results: + for result in results[config]: + runs.append({ + "eval_id": result["eval_id"], + "configuration": config, + "run_number": result["run_number"], + "result": { + "pass_rate": result["pass_rate"], + "passed": result["passed"], + "failed": result["failed"], + "total": result["total"], + "time_seconds": result["time_seconds"], + "tokens": result.get("tokens", 0), + "tool_calls": result.get("tool_calls", 0), + "errors": result.get("errors", 0) + }, + "expectations": result["expectations"], + "notes": result["notes"] + }) + + # Determine eval IDs from results + eval_ids = sorted(set( + r["eval_id"] + for config in results.values() + for r in config + )) + + benchmark = { + "metadata": { + "skill_name": skill_name or "", + "skill_path": skill_path or "", + "executor_model": "", + "analyzer_model": "", + "timestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), + "evals_run": eval_ids, + "runs_per_configuration": 3 + }, + "runs": runs, + "run_summary": run_summary, + "notes": [] # To be filled by analyzer + } + + return benchmark + + +def generate_markdown(benchmark: dict) -> str: + """Generate human-readable benchmark.md from benchmark data.""" + metadata = benchmark["metadata"] + run_summary = benchmark["run_summary"] + + # Determine config names (excluding "delta") + configs = [k for k in run_summary if k != "delta"] + config_a = configs[0] if len(configs) >= 1 else "config_a" + config_b = configs[1] if len(configs) >= 2 else "config_b" + label_a = config_a.replace("_", " ").title() + label_b = config_b.replace("_", " ").title() + + lines = [ + f"# Skill Benchmark: {metadata['skill_name']}", + "", + f"**Model**: {metadata['executor_model']}", + f"**Date**: {metadata['timestamp']}", + f"**Evals**: {', '.join(map(str, metadata['evals_run']))} ({metadata['runs_per_configuration']} runs each per configuration)", + "", + "## Summary", + "", + f"| Metric | {label_a} | {label_b} | Delta |", + "|--------|------------|---------------|-------|", + ] + + a_summary = run_summary.get(config_a, {}) + b_summary = run_summary.get(config_b, {}) + delta = run_summary.get("delta", {}) + + # Format pass rate + a_pr = a_summary.get("pass_rate", {}) + b_pr = b_summary.get("pass_rate", {}) + lines.append(f"| Pass Rate | {a_pr.get('mean', 0)*100:.0f}% ± {a_pr.get('stddev', 0)*100:.0f}% | {b_pr.get('mean', 0)*100:.0f}% ± {b_pr.get('stddev', 0)*100:.0f}% | {delta.get('pass_rate', '—')} |") + + # Format time + a_time = a_summary.get("time_seconds", {}) + b_time = b_summary.get("time_seconds", {}) + lines.append(f"| Time | {a_time.get('mean', 0):.1f}s ± {a_time.get('stddev', 0):.1f}s | {b_time.get('mean', 0):.1f}s ± {b_time.get('stddev', 0):.1f}s | {delta.get('time_seconds', '—')}s |") + + # Format tokens + a_tokens = a_summary.get("tokens", {}) + b_tokens = b_summary.get("tokens", {}) + lines.append(f"| Tokens | {a_tokens.get('mean', 0):.0f} ± {a_tokens.get('stddev', 0):.0f} | {b_tokens.get('mean', 0):.0f} ± {b_tokens.get('stddev', 0):.0f} | {delta.get('tokens', '—')} |") + + # Notes section + if benchmark.get("notes"): + lines.extend([ + "", + "## Notes", + "" + ]) + for note in benchmark["notes"]: + lines.append(f"- {note}") + + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Aggregate benchmark run results into summary statistics" + ) + parser.add_argument( + "benchmark_dir", + type=Path, + help="Path to the benchmark directory" + ) + parser.add_argument( + "--skill-name", + default="", + help="Name of the skill being benchmarked" + ) + parser.add_argument( + "--skill-path", + default="", + help="Path to the skill being benchmarked" + ) + parser.add_argument( + "--output", "-o", + type=Path, + help="Output path for benchmark.json (default: /benchmark.json)" + ) + + args = parser.parse_args() + + if not args.benchmark_dir.exists(): + print(f"Directory not found: {args.benchmark_dir}") + sys.exit(1) + + # Generate benchmark + benchmark = generate_benchmark(args.benchmark_dir, args.skill_name, args.skill_path) + + # Determine output paths + output_json = args.output or (args.benchmark_dir / "benchmark.json") + output_md = output_json.with_suffix(".md") + + # Write benchmark.json + with open(output_json, "w") as f: + json.dump(benchmark, f, indent=2) + print(f"Generated: {output_json}") + + # Write benchmark.md + markdown = generate_markdown(benchmark) + with open(output_md, "w") as f: + f.write(markdown) + print(f"Generated: {output_md}") + + # Print summary + run_summary = benchmark["run_summary"] + configs = [k for k in run_summary if k != "delta"] + delta = run_summary.get("delta", {}) + + print(f"\nSummary:") + for config in configs: + pr = run_summary[config]["pass_rate"]["mean"] + label = config.replace("_", " ").title() + print(f" {label}: {pr*100:.1f}% pass rate") + print(f" Delta: {delta.get('pass_rate', '—')}") + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/generate_report.py b/devcon/.agents/skills/skill-creator/scripts/generate_report.py new file mode 100755 index 000000000..959e30a00 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/generate_report.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +"""Generate an HTML report from run_loop.py output. + +Takes the JSON output from run_loop.py and generates a visual HTML report +showing each description attempt with check/x for each test case. +Distinguishes between train and test queries. +""" + +import argparse +import html +import json +import sys +from pathlib import Path + + +def generate_html(data: dict, auto_refresh: bool = False, skill_name: str = "") -> str: + """Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag.""" + history = data.get("history", []) + holdout = data.get("holdout", 0) + title_prefix = html.escape(skill_name + " \u2014 ") if skill_name else "" + + # Get all unique queries from train and test sets, with should_trigger info + train_queries: list[dict] = [] + test_queries: list[dict] = [] + if history: + for r in history[0].get("train_results", history[0].get("results", [])): + train_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + if history[0].get("test_results"): + for r in history[0].get("test_results", []): + test_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)}) + + refresh_tag = ' \n' if auto_refresh else "" + + html_parts = [""" + + + +""" + refresh_tag + """ """ + title_prefix + """Skill Description Optimization + + + + + + +

""" + title_prefix + """Skill Description Optimization

+
+ Optimizing your skill's description. This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The "Train" score shows performance on queries used to improve the description; the "Test" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill. +
+"""] + + # Summary section + best_test_score = data.get('best_test_score') + best_train_score = data.get('best_train_score') + html_parts.append(f""" +
+

Original: {html.escape(data.get('original_description', 'N/A'))}

+

Best: {html.escape(data.get('best_description', 'N/A'))}

+

Best Score: {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}

+

Iterations: {data.get('iterations_run', 0)} | Train: {data.get('train_size', '?')} | Test: {data.get('test_size', '?')}

+
+""") + + # Legend + html_parts.append(""" +
+ Query columns: + Should trigger + Should NOT trigger + Train + Test +
+""") + + # Table header + html_parts.append(""" +
+ + + + + + + +""") + + # Add column headers for train queries + for qinfo in train_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + # Add column headers for test queries (different color) + for qinfo in test_queries: + polarity = "positive-col" if qinfo["should_trigger"] else "negative-col" + html_parts.append(f' \n') + + html_parts.append(""" + + +""") + + # Find best iteration for highlighting + if test_queries: + best_iter = max(history, key=lambda h: h.get("test_passed") or 0).get("iteration") + else: + best_iter = max(history, key=lambda h: h.get("train_passed", h.get("passed", 0))).get("iteration") + + # Add rows for each iteration + for h in history: + iteration = h.get("iteration", "?") + train_passed = h.get("train_passed", h.get("passed", 0)) + train_total = h.get("train_total", h.get("total", 0)) + test_passed = h.get("test_passed") + test_total = h.get("test_total") + description = h.get("description", "") + train_results = h.get("train_results", h.get("results", [])) + test_results = h.get("test_results", []) + + # Create lookups for results by query + train_by_query = {r["query"]: r for r in train_results} + test_by_query = {r["query"]: r for r in test_results} if test_results else {} + + # Compute aggregate correct/total runs across all retries + def aggregate_runs(results: list[dict]) -> tuple[int, int]: + correct = 0 + total = 0 + for r in results: + runs = r.get("runs", 0) + triggers = r.get("triggers", 0) + total += runs + if r.get("should_trigger", True): + correct += triggers + else: + correct += runs - triggers + return correct, total + + train_correct, train_runs = aggregate_runs(train_results) + test_correct, test_runs = aggregate_runs(test_results) + + # Determine score classes + def score_class(correct: int, total: int) -> str: + if total > 0: + ratio = correct / total + if ratio >= 0.8: + return "score-good" + elif ratio >= 0.5: + return "score-ok" + return "score-bad" + + train_class = score_class(train_correct, train_runs) + test_class = score_class(test_correct, test_runs) + + row_class = "best-row" if iteration == best_iter else "" + + html_parts.append(f""" + + + + +""") + + # Add result for each train query + for qinfo in train_queries: + r = train_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + # Add result for each test query (with different background) + for qinfo in test_queries: + r = test_by_query.get(qinfo["query"], {}) + did_pass = r.get("pass", False) + triggers = r.get("triggers", 0) + runs = r.get("runs", 0) + + icon = "✓" if did_pass else "✗" + css_class = "pass" if did_pass else "fail" + + html_parts.append(f' \n') + + html_parts.append(" \n") + + html_parts.append(""" +
IterTrainTestDescription{html.escape(qinfo["query"])}{html.escape(qinfo["query"])}
{iteration}{train_correct}/{train_runs}{test_correct}/{test_runs}{html.escape(description)}{icon}{triggers}/{runs}{icon}{triggers}/{runs}
+
+""") + + html_parts.append(""" + + +""") + + return "".join(html_parts) + + +def main(): + parser = argparse.ArgumentParser(description="Generate HTML report from run_loop output") + parser.add_argument("input", help="Path to JSON output from run_loop.py (or - for stdin)") + parser.add_argument("-o", "--output", default=None, help="Output HTML file (default: stdout)") + parser.add_argument("--skill-name", default="", help="Skill name to include in the report title") + args = parser.parse_args() + + if args.input == "-": + data = json.load(sys.stdin) + else: + data = json.loads(Path(args.input).read_text()) + + html_output = generate_html(data, skill_name=args.skill_name) + + if args.output: + Path(args.output).write_text(html_output) + print(f"Report written to {args.output}", file=sys.stderr) + else: + print(html_output) + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/improve_description.py b/devcon/.agents/skills/skill-creator/scripts/improve_description.py new file mode 100755 index 000000000..a270777b9 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/improve_description.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +"""Improve a skill description based on eval results. + +Takes eval results (from run_eval.py) and generates an improved description +using Claude with extended thinking. +""" + +import argparse +import json +import re +import sys +from pathlib import Path + +import anthropic + +from scripts.utils import parse_skill_md + + +def improve_description( + client: anthropic.Anthropic, + skill_name: str, + skill_content: str, + current_description: str, + eval_results: dict, + history: list[dict], + model: str, + test_results: dict | None = None, + log_dir: Path | None = None, + iteration: int | None = None, +) -> str: + """Call Claude to improve the description based on eval results.""" + failed_triggers = [ + r for r in eval_results["results"] + if r["should_trigger"] and not r["pass"] + ] + false_triggers = [ + r for r in eval_results["results"] + if not r["should_trigger"] and not r["pass"] + ] + + # Build scores summary + train_score = f"{eval_results['summary']['passed']}/{eval_results['summary']['total']}" + if test_results: + test_score = f"{test_results['summary']['passed']}/{test_results['summary']['total']}" + scores_summary = f"Train: {train_score}, Test: {test_score}" + else: + scores_summary = f"Train: {train_score}" + + prompt = f"""You are optimizing a skill description for a Claude Code skill called "{skill_name}". A "skill" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples. + +The description appears in Claude's "available_skills" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones. + +Here's the current description: + +"{current_description}" + + +Current scores ({scores_summary}): + +""" + if failed_triggers: + prompt += "FAILED TO TRIGGER (should have triggered but didn't):\n" + for r in failed_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if false_triggers: + prompt += "FALSE TRIGGERS (triggered but shouldn't have):\n" + for r in false_triggers: + prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n' + prompt += "\n" + + if history: + prompt += "PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\n\n" + for h in history: + train_s = f"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}" + test_s = f"{h.get('test_passed', '?')}/{h.get('test_total', '?')}" if h.get('test_passed') is not None else None + score_str = f"train={train_s}" + (f", test={test_s}" if test_s else "") + prompt += f'\n' + prompt += f'Description: "{h["description"]}"\n' + if "results" in h: + prompt += "Train results:\n" + for r in h["results"]: + status = "PASS" if r["pass"] else "FAIL" + prompt += f' [{status}] "{r["query"][:80]}" (triggered {r["triggers"]}/{r["runs"]})\n' + if h.get("note"): + prompt += f'Note: {h["note"]}\n' + prompt += "\n\n" + + prompt += f""" + +Skill content (for context on what the skill does): + +{skill_content} + + +Based on the failures, write a new and improved description that is more likely to trigger correctly. When I say "based on the failures", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold: + +1. Avoid overfitting +2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description. + +Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy. + +Here are some tips that we've found to work well in writing these descriptions: +- The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does" +- The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works. +- The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable. +- If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings. + +I'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end. + +Please respond with only the new description text in tags, nothing else.""" + + response = client.messages.create( + model=model, + max_tokens=16000, + thinking={ + "type": "enabled", + "budget_tokens": 10000, + }, + messages=[{"role": "user", "content": prompt}], + ) + + # Extract thinking and text from response + thinking_text = "" + text = "" + for block in response.content: + if block.type == "thinking": + thinking_text = block.thinking + elif block.type == "text": + text = block.text + + # Parse out the tags + match = re.search(r"(.*?)", text, re.DOTALL) + description = match.group(1).strip().strip('"') if match else text.strip().strip('"') + + # Log the transcript + transcript: dict = { + "iteration": iteration, + "prompt": prompt, + "thinking": thinking_text, + "response": text, + "parsed_description": description, + "char_count": len(description), + "over_limit": len(description) > 1024, + } + + # If over 1024 chars, ask the model to shorten it + if len(description) > 1024: + shorten_prompt = f"Your description is {len(description)} characters, which exceeds the hard 1024 character limit. Please rewrite it to be under 1024 characters while preserving the most important trigger words and intent coverage. Respond with only the new description in tags." + shorten_response = client.messages.create( + model=model, + max_tokens=16000, + thinking={ + "type": "enabled", + "budget_tokens": 10000, + }, + messages=[ + {"role": "user", "content": prompt}, + {"role": "assistant", "content": text}, + {"role": "user", "content": shorten_prompt}, + ], + ) + + shorten_thinking = "" + shorten_text = "" + for block in shorten_response.content: + if block.type == "thinking": + shorten_thinking = block.thinking + elif block.type == "text": + shorten_text = block.text + + match = re.search(r"(.*?)", shorten_text, re.DOTALL) + shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"') + + transcript["rewrite_prompt"] = shorten_prompt + transcript["rewrite_thinking"] = shorten_thinking + transcript["rewrite_response"] = shorten_text + transcript["rewrite_description"] = shortened + transcript["rewrite_char_count"] = len(shortened) + description = shortened + + transcript["final_description"] = description + + if log_dir: + log_dir.mkdir(parents=True, exist_ok=True) + log_file = log_dir / f"improve_iter_{iteration or 'unknown'}.json" + log_file.write_text(json.dumps(transcript, indent=2)) + + return description + + +def main(): + parser = argparse.ArgumentParser(description="Improve a skill description based on eval results") + parser.add_argument("--eval-results", required=True, help="Path to eval results JSON (from run_eval.py)") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--history", default=None, help="Path to history JSON (previous attempts)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print thinking to stderr") + args = parser.parse_args() + + skill_path = Path(args.skill_path) + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + eval_results = json.loads(Path(args.eval_results).read_text()) + history = [] + if args.history: + history = json.loads(Path(args.history).read_text()) + + name, _, content = parse_skill_md(skill_path) + current_description = eval_results["description"] + + if args.verbose: + print(f"Current: {current_description}", file=sys.stderr) + print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr) + + client = anthropic.Anthropic() + new_description = improve_description( + client=client, + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=eval_results, + history=history, + model=args.model, + ) + + if args.verbose: + print(f"Improved: {new_description}", file=sys.stderr) + + # Output as JSON with both the new description and updated history + output = { + "description": new_description, + "history": history + [{ + "description": current_description, + "passed": eval_results["summary"]["passed"], + "failed": eval_results["summary"]["failed"], + "total": eval_results["summary"]["total"], + "results": eval_results["results"], + }], + } + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/package_skill.py b/devcon/.agents/skills/skill-creator/scripts/package_skill.py new file mode 100755 index 000000000..f48eac444 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/package_skill.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +""" +Skill Packager - Creates a distributable .skill file of a skill folder + +Usage: + python utils/package_skill.py [output-directory] + +Example: + python utils/package_skill.py skills/public/my-skill + python utils/package_skill.py skills/public/my-skill ./dist +""" + +import fnmatch +import sys +import zipfile +from pathlib import Path +from scripts.quick_validate import validate_skill + +# Patterns to exclude when packaging skills. +EXCLUDE_DIRS = {"__pycache__", "node_modules"} +EXCLUDE_GLOBS = {"*.pyc"} +EXCLUDE_FILES = {".DS_Store"} +# Directories excluded only at the skill root (not when nested deeper). +ROOT_EXCLUDE_DIRS = {"evals"} + + +def should_exclude(rel_path: Path) -> bool: + """Check if a path should be excluded from packaging.""" + parts = rel_path.parts + if any(part in EXCLUDE_DIRS for part in parts): + return True + # rel_path is relative to skill_path.parent, so parts[0] is the skill + # folder name and parts[1] (if present) is the first subdir. + if len(parts) > 1 and parts[1] in ROOT_EXCLUDE_DIRS: + return True + name = rel_path.name + if name in EXCLUDE_FILES: + return True + return any(fnmatch.fnmatch(name, pat) for pat in EXCLUDE_GLOBS) + + +def package_skill(skill_path, output_dir=None): + """ + Package a skill folder into a .skill file. + + Args: + skill_path: Path to the skill folder + output_dir: Optional output directory for the .skill file (defaults to current directory) + + Returns: + Path to the created .skill file, or None if error + """ + skill_path = Path(skill_path).resolve() + + # Validate skill folder exists + if not skill_path.exists(): + print(f"❌ Error: Skill folder not found: {skill_path}") + return None + + if not skill_path.is_dir(): + print(f"❌ Error: Path is not a directory: {skill_path}") + return None + + # Validate SKILL.md exists + skill_md = skill_path / "SKILL.md" + if not skill_md.exists(): + print(f"❌ Error: SKILL.md not found in {skill_path}") + return None + + # Run validation before packaging + print("🔍 Validating skill...") + valid, message = validate_skill(skill_path) + if not valid: + print(f"❌ Validation failed: {message}") + print(" Please fix the validation errors before packaging.") + return None + print(f"✅ {message}\n") + + # Determine output location + skill_name = skill_path.name + if output_dir: + output_path = Path(output_dir).resolve() + output_path.mkdir(parents=True, exist_ok=True) + else: + output_path = Path.cwd() + + skill_filename = output_path / f"{skill_name}.skill" + + # Create the .skill file (zip format) + try: + with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf: + # Walk through the skill directory, excluding build artifacts + for file_path in skill_path.rglob('*'): + if not file_path.is_file(): + continue + arcname = file_path.relative_to(skill_path.parent) + if should_exclude(arcname): + print(f" Skipped: {arcname}") + continue + zipf.write(file_path, arcname) + print(f" Added: {arcname}") + + print(f"\n✅ Successfully packaged skill to: {skill_filename}") + return skill_filename + + except Exception as e: + print(f"❌ Error creating .skill file: {e}") + return None + + +def main(): + if len(sys.argv) < 2: + print("Usage: python utils/package_skill.py [output-directory]") + print("\nExample:") + print(" python utils/package_skill.py skills/public/my-skill") + print(" python utils/package_skill.py skills/public/my-skill ./dist") + sys.exit(1) + + skill_path = sys.argv[1] + output_dir = sys.argv[2] if len(sys.argv) > 2 else None + + print(f"📦 Packaging skill: {skill_path}") + if output_dir: + print(f" Output directory: {output_dir}") + print() + + result = package_skill(skill_path, output_dir) + + if result: + sys.exit(0) + else: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/quick_validate.py b/devcon/.agents/skills/skill-creator/scripts/quick_validate.py new file mode 100755 index 000000000..ed8e1dddc --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/quick_validate.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Quick validation script for skills - minimal version +""" + +import sys +import os +import re +import yaml +from pathlib import Path + +def validate_skill(skill_path): + """Basic validation of a skill""" + skill_path = Path(skill_path) + + # Check SKILL.md exists + skill_md = skill_path / 'SKILL.md' + if not skill_md.exists(): + return False, "SKILL.md not found" + + # Read and validate frontmatter + content = skill_md.read_text() + if not content.startswith('---'): + return False, "No YAML frontmatter found" + + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if not match: + return False, "Invalid frontmatter format" + + frontmatter_text = match.group(1) + + # Parse YAML frontmatter + try: + frontmatter = yaml.safe_load(frontmatter_text) + if not isinstance(frontmatter, dict): + return False, "Frontmatter must be a YAML dictionary" + except yaml.YAMLError as e: + return False, f"Invalid YAML in frontmatter: {e}" + + # Define allowed properties + ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata', 'compatibility'} + + # Check for unexpected properties (excluding nested keys under metadata) + unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES + if unexpected_keys: + return False, ( + f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. " + f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}" + ) + + # Check required fields + if 'name' not in frontmatter: + return False, "Missing 'name' in frontmatter" + if 'description' not in frontmatter: + return False, "Missing 'description' in frontmatter" + + # Extract name for validation + name = frontmatter.get('name', '') + if not isinstance(name, str): + return False, f"Name must be a string, got {type(name).__name__}" + name = name.strip() + if name: + # Check naming convention (kebab-case: lowercase with hyphens) + if not re.match(r'^[a-z0-9-]+$', name): + return False, f"Name '{name}' should be kebab-case (lowercase letters, digits, and hyphens only)" + if name.startswith('-') or name.endswith('-') or '--' in name: + return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens" + # Check name length (max 64 characters per spec) + if len(name) > 64: + return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters." + + # Extract and validate description + description = frontmatter.get('description', '') + if not isinstance(description, str): + return False, f"Description must be a string, got {type(description).__name__}" + description = description.strip() + if description: + # Check for angle brackets + if '<' in description or '>' in description: + return False, "Description cannot contain angle brackets (< or >)" + # Check description length (max 1024 characters per spec) + if len(description) > 1024: + return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters." + + # Validate compatibility field if present (optional) + compatibility = frontmatter.get('compatibility', '') + if compatibility: + if not isinstance(compatibility, str): + return False, f"Compatibility must be a string, got {type(compatibility).__name__}" + if len(compatibility) > 500: + return False, f"Compatibility is too long ({len(compatibility)} characters). Maximum is 500 characters." + + return True, "Skill is valid!" + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python quick_validate.py ") + sys.exit(1) + + valid, message = validate_skill(sys.argv[1]) + print(message) + sys.exit(0 if valid else 1) \ No newline at end of file diff --git a/devcon/.agents/skills/skill-creator/scripts/run_eval.py b/devcon/.agents/skills/skill-creator/scripts/run_eval.py new file mode 100755 index 000000000..e58c70bea --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/run_eval.py @@ -0,0 +1,310 @@ +#!/usr/bin/env python3 +"""Run trigger evaluation for a skill description. + +Tests whether a skill's description causes Claude to trigger (read the skill) +for a set of queries. Outputs results as JSON. +""" + +import argparse +import json +import os +import select +import subprocess +import sys +import time +import uuid +from concurrent.futures import ProcessPoolExecutor, as_completed +from pathlib import Path + +from scripts.utils import parse_skill_md + + +def find_project_root() -> Path: + """Find the project root by walking up from cwd looking for .claude/. + + Mimics how Claude Code discovers its project root, so the command file + we create ends up where claude -p will look for it. + """ + current = Path.cwd() + for parent in [current, *current.parents]: + if (parent / ".claude").is_dir(): + return parent + return current + + +def run_single_query( + query: str, + skill_name: str, + skill_description: str, + timeout: int, + project_root: str, + model: str | None = None, +) -> bool: + """Run a single query and return whether the skill was triggered. + + Creates a command file in .claude/commands/ so it appears in Claude's + available_skills list, then runs `claude -p` with the raw query. + Uses --include-partial-messages to detect triggering early from + stream events (content_block_start) rather than waiting for the + full assistant message, which only arrives after tool execution. + """ + unique_id = uuid.uuid4().hex[:8] + clean_name = f"{skill_name}-skill-{unique_id}" + project_commands_dir = Path(project_root) / ".claude" / "commands" + command_file = project_commands_dir / f"{clean_name}.md" + + try: + project_commands_dir.mkdir(parents=True, exist_ok=True) + # Use YAML block scalar to avoid breaking on quotes in description + indented_desc = "\n ".join(skill_description.split("\n")) + command_content = ( + f"---\n" + f"description: |\n" + f" {indented_desc}\n" + f"---\n\n" + f"# {skill_name}\n\n" + f"This skill handles: {skill_description}\n" + ) + command_file.write_text(command_content) + + cmd = [ + "claude", + "-p", query, + "--output-format", "stream-json", + "--verbose", + "--include-partial-messages", + ] + if model: + cmd.extend(["--model", model]) + + # Remove CLAUDECODE env var to allow nesting claude -p inside a + # Claude Code session. The guard is for interactive terminal conflicts; + # programmatic subprocess usage is safe. + env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"} + + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=project_root, + env=env, + ) + + triggered = False + start_time = time.time() + buffer = "" + # Track state for stream event detection + pending_tool_name = None + accumulated_json = "" + + try: + while time.time() - start_time < timeout: + if process.poll() is not None: + remaining = process.stdout.read() + if remaining: + buffer += remaining.decode("utf-8", errors="replace") + break + + ready, _, _ = select.select([process.stdout], [], [], 1.0) + if not ready: + continue + + chunk = os.read(process.stdout.fileno(), 8192) + if not chunk: + break + buffer += chunk.decode("utf-8", errors="replace") + + while "\n" in buffer: + line, buffer = buffer.split("\n", 1) + line = line.strip() + if not line: + continue + + try: + event = json.loads(line) + except json.JSONDecodeError: + continue + + # Early detection via stream events + if event.get("type") == "stream_event": + se = event.get("event", {}) + se_type = se.get("type", "") + + if se_type == "content_block_start": + cb = se.get("content_block", {}) + if cb.get("type") == "tool_use": + tool_name = cb.get("name", "") + if tool_name in ("Skill", "Read"): + pending_tool_name = tool_name + accumulated_json = "" + else: + return False + + elif se_type == "content_block_delta" and pending_tool_name: + delta = se.get("delta", {}) + if delta.get("type") == "input_json_delta": + accumulated_json += delta.get("partial_json", "") + if clean_name in accumulated_json: + return True + + elif se_type in ("content_block_stop", "message_stop"): + if pending_tool_name: + return clean_name in accumulated_json + if se_type == "message_stop": + return False + + # Fallback: full assistant message + elif event.get("type") == "assistant": + message = event.get("message", {}) + for content_item in message.get("content", []): + if content_item.get("type") != "tool_use": + continue + tool_name = content_item.get("name", "") + tool_input = content_item.get("input", {}) + if tool_name == "Skill" and clean_name in tool_input.get("skill", ""): + triggered = True + elif tool_name == "Read" and clean_name in tool_input.get("file_path", ""): + triggered = True + return triggered + + elif event.get("type") == "result": + return triggered + finally: + # Clean up process on any exit path (return, exception, timeout) + if process.poll() is None: + process.kill() + process.wait() + + return triggered + finally: + if command_file.exists(): + command_file.unlink() + + +def run_eval( + eval_set: list[dict], + skill_name: str, + description: str, + num_workers: int, + timeout: int, + project_root: Path, + runs_per_query: int = 1, + trigger_threshold: float = 0.5, + model: str | None = None, +) -> dict: + """Run the full eval set and return results.""" + results = [] + + with ProcessPoolExecutor(max_workers=num_workers) as executor: + future_to_info = {} + for item in eval_set: + for run_idx in range(runs_per_query): + future = executor.submit( + run_single_query, + item["query"], + skill_name, + description, + timeout, + str(project_root), + model, + ) + future_to_info[future] = (item, run_idx) + + query_triggers: dict[str, list[bool]] = {} + query_items: dict[str, dict] = {} + for future in as_completed(future_to_info): + item, _ = future_to_info[future] + query = item["query"] + query_items[query] = item + if query not in query_triggers: + query_triggers[query] = [] + try: + query_triggers[query].append(future.result()) + except Exception as e: + print(f"Warning: query failed: {e}", file=sys.stderr) + query_triggers[query].append(False) + + for query, triggers in query_triggers.items(): + item = query_items[query] + trigger_rate = sum(triggers) / len(triggers) + should_trigger = item["should_trigger"] + if should_trigger: + did_pass = trigger_rate >= trigger_threshold + else: + did_pass = trigger_rate < trigger_threshold + results.append({ + "query": query, + "should_trigger": should_trigger, + "trigger_rate": trigger_rate, + "triggers": sum(triggers), + "runs": len(triggers), + "pass": did_pass, + }) + + passed = sum(1 for r in results if r["pass"]) + total = len(results) + + return { + "skill_name": skill_name, + "description": description, + "results": results, + "summary": { + "total": total, + "passed": passed, + "failed": total - passed, + }, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run trigger evaluation for a skill description") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override description to test") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--model", default=None, help="Model to use for claude -p (default: user's configured model)") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, original_description, content = parse_skill_md(skill_path) + description = args.description or original_description + project_root = find_project_root() + + if args.verbose: + print(f"Evaluating: {description}", file=sys.stderr) + + output = run_eval( + eval_set=eval_set, + skill_name=name, + description=description, + num_workers=args.num_workers, + timeout=args.timeout, + project_root=project_root, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + model=args.model, + ) + + if args.verbose: + summary = output["summary"] + print(f"Results: {summary['passed']}/{summary['total']} passed", file=sys.stderr) + for r in output["results"]: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}", file=sys.stderr) + + print(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/run_loop.py b/devcon/.agents/skills/skill-creator/scripts/run_loop.py new file mode 100755 index 000000000..36f9b4e05 --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/run_loop.py @@ -0,0 +1,332 @@ +#!/usr/bin/env python3 +"""Run the eval + improve loop until all pass or max iterations reached. + +Combines run_eval.py and improve_description.py in a loop, tracking history +and returning the best description found. Supports train/test split to prevent +overfitting. +""" + +import argparse +import json +import random +import sys +import tempfile +import time +import webbrowser +from pathlib import Path + +import anthropic + +from scripts.generate_report import generate_html +from scripts.improve_description import improve_description +from scripts.run_eval import find_project_root, run_eval +from scripts.utils import parse_skill_md + + +def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]: + """Split eval set into train and test sets, stratified by should_trigger.""" + random.seed(seed) + + # Separate by should_trigger + trigger = [e for e in eval_set if e["should_trigger"]] + no_trigger = [e for e in eval_set if not e["should_trigger"]] + + # Shuffle each group + random.shuffle(trigger) + random.shuffle(no_trigger) + + # Calculate split points + n_trigger_test = max(1, int(len(trigger) * holdout)) + n_no_trigger_test = max(1, int(len(no_trigger) * holdout)) + + # Split + test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test] + train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:] + + return train_set, test_set + + +def run_loop( + eval_set: list[dict], + skill_path: Path, + description_override: str | None, + num_workers: int, + timeout: int, + max_iterations: int, + runs_per_query: int, + trigger_threshold: float, + holdout: float, + model: str, + verbose: bool, + live_report_path: Path | None = None, + log_dir: Path | None = None, +) -> dict: + """Run the eval + improvement loop.""" + project_root = find_project_root() + name, original_description, content = parse_skill_md(skill_path) + current_description = description_override or original_description + + # Split into train/test if holdout > 0 + if holdout > 0: + train_set, test_set = split_eval_set(eval_set, holdout) + if verbose: + print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr) + else: + train_set = eval_set + test_set = [] + + client = anthropic.Anthropic() + history = [] + exit_reason = "unknown" + + for iteration in range(1, max_iterations + 1): + if verbose: + print(f"\n{'='*60}", file=sys.stderr) + print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr) + print(f"Description: {current_description}", file=sys.stderr) + print(f"{'='*60}", file=sys.stderr) + + # Evaluate train + test together in one batch for parallelism + all_queries = train_set + test_set + t0 = time.time() + all_results = run_eval( + eval_set=all_queries, + skill_name=name, + description=current_description, + num_workers=num_workers, + timeout=timeout, + project_root=project_root, + runs_per_query=runs_per_query, + trigger_threshold=trigger_threshold, + model=model, + ) + eval_elapsed = time.time() - t0 + + # Split results back into train/test by matching queries + train_queries_set = {q["query"] for q in train_set} + train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set] + test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set] + + train_passed = sum(1 for r in train_result_list if r["pass"]) + train_total = len(train_result_list) + train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total} + train_results = {"results": train_result_list, "summary": train_summary} + + if test_set: + test_passed = sum(1 for r in test_result_list if r["pass"]) + test_total = len(test_result_list) + test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total} + test_results = {"results": test_result_list, "summary": test_summary} + else: + test_results = None + test_summary = None + + history.append({ + "iteration": iteration, + "description": current_description, + "train_passed": train_summary["passed"], + "train_failed": train_summary["failed"], + "train_total": train_summary["total"], + "train_results": train_results["results"], + "test_passed": test_summary["passed"] if test_summary else None, + "test_failed": test_summary["failed"] if test_summary else None, + "test_total": test_summary["total"] if test_summary else None, + "test_results": test_results["results"] if test_results else None, + # For backward compat with report generator + "passed": train_summary["passed"], + "failed": train_summary["failed"], + "total": train_summary["total"], + "results": train_results["results"], + }) + + # Write live report if path provided + if live_report_path: + partial_output = { + "original_description": original_description, + "best_description": current_description, + "best_score": "in progress", + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name)) + + if verbose: + def print_eval_stats(label, results, elapsed): + pos = [r for r in results if r["should_trigger"]] + neg = [r for r in results if not r["should_trigger"]] + tp = sum(r["triggers"] for r in pos) + pos_runs = sum(r["runs"] for r in pos) + fn = pos_runs - tp + fp = sum(r["triggers"] for r in neg) + neg_runs = sum(r["runs"] for r in neg) + tn = neg_runs - fp + total = tp + tn + fp + fn + precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0 + recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0 + accuracy = (tp + tn) / total if total > 0 else 0.0 + print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr) + for r in results: + status = "PASS" if r["pass"] else "FAIL" + rate_str = f"{r['triggers']}/{r['runs']}" + print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr) + + print_eval_stats("Train", train_results["results"], eval_elapsed) + if test_summary: + print_eval_stats("Test ", test_results["results"], 0) + + if train_summary["failed"] == 0: + exit_reason = f"all_passed (iteration {iteration})" + if verbose: + print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr) + break + + if iteration == max_iterations: + exit_reason = f"max_iterations ({max_iterations})" + if verbose: + print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr) + break + + # Improve the description based on train results + if verbose: + print(f"\nImproving description...", file=sys.stderr) + + t0 = time.time() + # Strip test scores from history so improvement model can't see them + blinded_history = [ + {k: v for k, v in h.items() if not k.startswith("test_")} + for h in history + ] + new_description = improve_description( + client=client, + skill_name=name, + skill_content=content, + current_description=current_description, + eval_results=train_results, + history=blinded_history, + model=model, + log_dir=log_dir, + iteration=iteration, + ) + improve_elapsed = time.time() - t0 + + if verbose: + print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr) + + current_description = new_description + + # Find the best iteration by TEST score (or train if no test set) + if test_set: + best = max(history, key=lambda h: h["test_passed"] or 0) + best_score = f"{best['test_passed']}/{best['test_total']}" + else: + best = max(history, key=lambda h: h["train_passed"]) + best_score = f"{best['train_passed']}/{best['train_total']}" + + if verbose: + print(f"\nExit reason: {exit_reason}", file=sys.stderr) + print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr) + + return { + "exit_reason": exit_reason, + "original_description": original_description, + "best_description": best["description"], + "best_score": best_score, + "best_train_score": f"{best['train_passed']}/{best['train_total']}", + "best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None, + "final_description": current_description, + "iterations_run": len(history), + "holdout": holdout, + "train_size": len(train_set), + "test_size": len(test_set), + "history": history, + } + + +def main(): + parser = argparse.ArgumentParser(description="Run eval + improve loop") + parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file") + parser.add_argument("--skill-path", required=True, help="Path to skill directory") + parser.add_argument("--description", default=None, help="Override starting description") + parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers") + parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds") + parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations") + parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query") + parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold") + parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)") + parser.add_argument("--model", required=True, help="Model for improvement") + parser.add_argument("--verbose", action="store_true", help="Print progress to stderr") + parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)") + parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here") + args = parser.parse_args() + + eval_set = json.loads(Path(args.eval_set).read_text()) + skill_path = Path(args.skill_path) + + if not (skill_path / "SKILL.md").exists(): + print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr) + sys.exit(1) + + name, _, _ = parse_skill_md(skill_path) + + # Set up live report path + if args.report != "none": + if args.report == "auto": + timestamp = time.strftime("%Y%m%d_%H%M%S") + live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html" + else: + live_report_path = Path(args.report) + # Open the report immediately so the user can watch + live_report_path.write_text("

Starting optimization loop...

") + webbrowser.open(str(live_report_path)) + else: + live_report_path = None + + # Determine output directory (create before run_loop so logs can be written) + if args.results_dir: + timestamp = time.strftime("%Y-%m-%d_%H%M%S") + results_dir = Path(args.results_dir) / timestamp + results_dir.mkdir(parents=True, exist_ok=True) + else: + results_dir = None + + log_dir = results_dir / "logs" if results_dir else None + + output = run_loop( + eval_set=eval_set, + skill_path=skill_path, + description_override=args.description, + num_workers=args.num_workers, + timeout=args.timeout, + max_iterations=args.max_iterations, + runs_per_query=args.runs_per_query, + trigger_threshold=args.trigger_threshold, + holdout=args.holdout, + model=args.model, + verbose=args.verbose, + live_report_path=live_report_path, + log_dir=log_dir, + ) + + # Save JSON output + json_output = json.dumps(output, indent=2) + print(json_output) + if results_dir: + (results_dir / "results.json").write_text(json_output) + + # Write final HTML report (without auto-refresh) + if live_report_path: + live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name)) + print(f"\nReport: {live_report_path}", file=sys.stderr) + + if results_dir and live_report_path: + (results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name)) + + if results_dir: + print(f"Results saved to: {results_dir}", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/devcon/.agents/skills/skill-creator/scripts/utils.py b/devcon/.agents/skills/skill-creator/scripts/utils.py new file mode 100644 index 000000000..51b6a07dd --- /dev/null +++ b/devcon/.agents/skills/skill-creator/scripts/utils.py @@ -0,0 +1,47 @@ +"""Shared utilities for skill-creator scripts.""" + +from pathlib import Path + + + +def parse_skill_md(skill_path: Path) -> tuple[str, str, str]: + """Parse a SKILL.md file, returning (name, description, full_content).""" + content = (skill_path / "SKILL.md").read_text() + lines = content.split("\n") + + if lines[0].strip() != "---": + raise ValueError("SKILL.md missing frontmatter (no opening ---)") + + end_idx = None + for i, line in enumerate(lines[1:], start=1): + if line.strip() == "---": + end_idx = i + break + + if end_idx is None: + raise ValueError("SKILL.md missing frontmatter (no closing ---)") + + name = "" + description = "" + frontmatter_lines = lines[1:end_idx] + i = 0 + while i < len(frontmatter_lines): + line = frontmatter_lines[i] + if line.startswith("name:"): + name = line[len("name:"):].strip().strip('"').strip("'") + elif line.startswith("description:"): + value = line[len("description:"):].strip() + # Handle YAML multiline indicators (>, |, >-, |-) + if value in (">", "|", ">-", "|-"): + continuation_lines: list[str] = [] + i += 1 + while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")): + continuation_lines.append(frontmatter_lines[i].strip()) + i += 1 + description = " ".join(continuation_lines) + continue + else: + description = value.strip('"').strip("'") + i += 1 + + return name, description, content diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/AGENTS.md b/devcon/.agents/skills/supabase-postgres-best-practices/AGENTS.md new file mode 100644 index 000000000..a7baf4450 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/AGENTS.md @@ -0,0 +1,68 @@ +# Supabase Postgres Best Practices + +## Structure + +``` +supabase-postgres-best-practices/ + SKILL.md # Main skill file - read this first + AGENTS.md # This navigation guide + CLAUDE.md # Symlink to AGENTS.md + references/ # Detailed reference files +``` + +## Usage + +1. Read `SKILL.md` for the main skill instructions +2. Browse `references/` for detailed documentation on specific topics +3. Reference files are loaded on-demand - read only what you need + +Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. + +## When to Apply + +Reference these guidelines when: +- Writing SQL queries or designing schemas +- Implementing indexes or query optimization +- Reviewing database performance issues +- Configuring connection pooling or scaling +- Optimizing for Postgres-specific features +- Working with Row-Level Security (RLS) + +## Rule Categories by Priority + +| Priority | Category | Impact | Prefix | +|----------|----------|--------|--------| +| 1 | Query Performance | CRITICAL | `query-` | +| 2 | Connection Management | CRITICAL | `conn-` | +| 3 | Security & RLS | CRITICAL | `security-` | +| 4 | Schema Design | HIGH | `schema-` | +| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | +| 6 | Data Access Patterns | MEDIUM | `data-` | +| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | +| 8 | Advanced Features | LOW | `advanced-` | + +## How to Use + +Read individual rule files for detailed explanations and SQL examples: + +``` +references/query-missing-indexes.md +references/schema-partial-indexes.md +references/_sections.md +``` + +Each rule file contains: +- Brief explanation of why it matters +- Incorrect SQL example with explanation +- Correct SQL example with explanation +- Optional EXPLAIN output or metrics +- Additional context and references +- Supabase-specific notes (when applicable) + +## References + +- https://www.postgresql.org/docs/current/ +- https://supabase.com/docs +- https://wiki.postgresql.org/wiki/Performance_Optimization +- https://supabase.com/docs/guides/database/overview +- https://supabase.com/docs/guides/auth/row-level-security diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/CLAUDE.md b/devcon/.agents/skills/supabase-postgres-best-practices/CLAUDE.md new file mode 100644 index 000000000..a7baf4450 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/CLAUDE.md @@ -0,0 +1,68 @@ +# Supabase Postgres Best Practices + +## Structure + +``` +supabase-postgres-best-practices/ + SKILL.md # Main skill file - read this first + AGENTS.md # This navigation guide + CLAUDE.md # Symlink to AGENTS.md + references/ # Detailed reference files +``` + +## Usage + +1. Read `SKILL.md` for the main skill instructions +2. Browse `references/` for detailed documentation on specific topics +3. Reference files are loaded on-demand - read only what you need + +Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. + +## When to Apply + +Reference these guidelines when: +- Writing SQL queries or designing schemas +- Implementing indexes or query optimization +- Reviewing database performance issues +- Configuring connection pooling or scaling +- Optimizing for Postgres-specific features +- Working with Row-Level Security (RLS) + +## Rule Categories by Priority + +| Priority | Category | Impact | Prefix | +|----------|----------|--------|--------| +| 1 | Query Performance | CRITICAL | `query-` | +| 2 | Connection Management | CRITICAL | `conn-` | +| 3 | Security & RLS | CRITICAL | `security-` | +| 4 | Schema Design | HIGH | `schema-` | +| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | +| 6 | Data Access Patterns | MEDIUM | `data-` | +| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | +| 8 | Advanced Features | LOW | `advanced-` | + +## How to Use + +Read individual rule files for detailed explanations and SQL examples: + +``` +references/query-missing-indexes.md +references/schema-partial-indexes.md +references/_sections.md +``` + +Each rule file contains: +- Brief explanation of why it matters +- Incorrect SQL example with explanation +- Correct SQL example with explanation +- Optional EXPLAIN output or metrics +- Additional context and references +- Supabase-specific notes (when applicable) + +## References + +- https://www.postgresql.org/docs/current/ +- https://supabase.com/docs +- https://wiki.postgresql.org/wiki/Performance_Optimization +- https://supabase.com/docs/guides/database/overview +- https://supabase.com/docs/guides/auth/row-level-security diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/README.md b/devcon/.agents/skills/supabase-postgres-best-practices/README.md new file mode 100644 index 000000000..f1a374e11 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/README.md @@ -0,0 +1,116 @@ +# Supabase Postgres Best Practices - Contributor Guide + +This skill contains Postgres performance optimization references optimized for +AI agents and LLMs. It follows the [Agent Skills Open Standard](https://agentskills.io/). + +## Quick Start + +```bash +# From repository root +npm install + +# Validate existing references +npm run validate + +# Build AGENTS.md +npm run build +``` + +## Creating a New Reference + +1. **Choose a section prefix** based on the category: + - `query-` Query Performance (CRITICAL) + - `conn-` Connection Management (CRITICAL) + - `security-` Security & RLS (CRITICAL) + - `schema-` Schema Design (HIGH) + - `lock-` Concurrency & Locking (MEDIUM-HIGH) + - `data-` Data Access Patterns (MEDIUM) + - `monitor-` Monitoring & Diagnostics (LOW-MEDIUM) + - `advanced-` Advanced Features (LOW) + +2. **Copy the template**: + ```bash + cp references/_template.md references/query-your-reference-name.md + ``` + +3. **Fill in the content** following the template structure + +4. **Validate and build**: + ```bash + npm run validate + npm run build + ``` + +5. **Review** the generated `AGENTS.md` + +## Skill Structure + +``` +skills/supabase-postgres-best-practices/ +├── SKILL.md # Agent-facing skill manifest (Agent Skills spec) +├── AGENTS.md # [GENERATED] Compiled references document +├── README.md # This file +└── references/ + ├── _template.md # Reference template + ├── _sections.md # Section definitions + ├── _contributing.md # Writing guidelines + └── *.md # Individual references + +packages/skills-build/ +├── src/ # Generic build system source +└── package.json # NPM scripts +``` + +## Reference File Structure + +See `references/_template.md` for the complete template. Key elements: + +````markdown +--- +title: Clear, Action-Oriented Title +impact: CRITICAL|HIGH|MEDIUM-HIGH|MEDIUM|LOW-MEDIUM|LOW +impactDescription: Quantified benefit (e.g., "10-100x faster") +tags: relevant, keywords +--- + +## [Title] + +[1-2 sentence explanation] + +**Incorrect (description):** + +```sql +-- Comment explaining what's wrong +[Bad SQL example] +``` +```` + +**Correct (description):** + +```sql +-- Comment explaining why this is better +[Good SQL example] +``` + +``` +## Writing Guidelines + +See `references/_contributing.md` for detailed guidelines. Key principles: + +1. **Show concrete transformations** - "Change X to Y", not abstract advice +2. **Error-first structure** - Show the problem before the solution +3. **Quantify impact** - Include specific metrics (10x faster, 50% smaller) +4. **Self-contained examples** - Complete, runnable SQL +5. **Semantic naming** - Use meaningful names (users, email), not (table1, col1) + +## Impact Levels + +| Level | Improvement | Examples | +|-------|-------------|----------| +| CRITICAL | 10-100x | Missing indexes, connection exhaustion | +| HIGH | 5-20x | Wrong index types, poor partitioning | +| MEDIUM-HIGH | 2-5x | N+1 queries, RLS optimization | +| MEDIUM | 1.5-3x | Redundant indexes, stale statistics | +| LOW-MEDIUM | 1.2-2x | VACUUM tuning, config tweaks | +| LOW | Incremental | Advanced patterns, edge cases | +``` diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/SKILL.md b/devcon/.agents/skills/supabase-postgres-best-practices/SKILL.md new file mode 100644 index 000000000..f80be1562 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/SKILL.md @@ -0,0 +1,64 @@ +--- +name: supabase-postgres-best-practices +description: Postgres performance optimization and best practices from Supabase. Use this skill when writing, reviewing, or optimizing Postgres queries, schema designs, or database configurations. +license: MIT +metadata: + author: supabase + version: "1.1.0" + organization: Supabase + date: January 2026 + abstract: Comprehensive Postgres performance optimization guide for developers using Supabase and Postgres. Contains performance rules across 8 categories, prioritized by impact from critical (query performance, connection management) to incremental (advanced features). Each rule includes detailed explanations, incorrect vs. correct SQL examples, query plan analysis, and specific performance metrics to guide automated optimization and code generation. +--- + +# Supabase Postgres Best Practices + +Comprehensive performance optimization guide for Postgres, maintained by Supabase. Contains rules across 8 categories, prioritized by impact to guide automated query optimization and schema design. + +## When to Apply + +Reference these guidelines when: +- Writing SQL queries or designing schemas +- Implementing indexes or query optimization +- Reviewing database performance issues +- Configuring connection pooling or scaling +- Optimizing for Postgres-specific features +- Working with Row-Level Security (RLS) + +## Rule Categories by Priority + +| Priority | Category | Impact | Prefix | +|----------|----------|--------|--------| +| 1 | Query Performance | CRITICAL | `query-` | +| 2 | Connection Management | CRITICAL | `conn-` | +| 3 | Security & RLS | CRITICAL | `security-` | +| 4 | Schema Design | HIGH | `schema-` | +| 5 | Concurrency & Locking | MEDIUM-HIGH | `lock-` | +| 6 | Data Access Patterns | MEDIUM | `data-` | +| 7 | Monitoring & Diagnostics | LOW-MEDIUM | `monitor-` | +| 8 | Advanced Features | LOW | `advanced-` | + +## How to Use + +Read individual rule files for detailed explanations and SQL examples: + +``` +references/query-missing-indexes.md +references/schema-partial-indexes.md +references/_sections.md +``` + +Each rule file contains: +- Brief explanation of why it matters +- Incorrect SQL example with explanation +- Correct SQL example with explanation +- Optional EXPLAIN output or metrics +- Additional context and references +- Supabase-specific notes (when applicable) + +## References + +- https://www.postgresql.org/docs/current/ +- https://supabase.com/docs +- https://wiki.postgresql.org/wiki/Performance_Optimization +- https://supabase.com/docs/guides/database/overview +- https://supabase.com/docs/guides/auth/row-level-security diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md new file mode 100644 index 000000000..582cbeaad --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-full-text-search.md @@ -0,0 +1,55 @@ +--- +title: Use tsvector for Full-Text Search +impact: MEDIUM +impactDescription: 100x faster than LIKE, with ranking support +tags: full-text-search, tsvector, gin, search +--- + +## Use tsvector for Full-Text Search + +LIKE with wildcards can't use indexes. Full-text search with tsvector is orders of magnitude faster. + +**Incorrect (LIKE pattern matching):** + +```sql +-- Cannot use index, scans all rows +select * from articles where content like '%postgresql%'; + +-- Case-insensitive makes it worse +select * from articles where lower(content) like '%postgresql%'; +``` + +**Correct (full-text search with tsvector):** + +```sql +-- Add tsvector column and index +alter table articles add column search_vector tsvector + generated always as (to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))) stored; + +create index articles_search_idx on articles using gin (search_vector); + +-- Fast full-text search +select * from articles +where search_vector @@ to_tsquery('english', 'postgresql & performance'); + +-- With ranking +select *, ts_rank(search_vector, query) as rank +from articles, to_tsquery('english', 'postgresql') query +where search_vector @@ query +order by rank desc; +``` + +Search multiple terms: + +```sql +-- AND: both terms required +to_tsquery('postgresql & performance') + +-- OR: either term +to_tsquery('postgresql | mysql') + +-- Prefix matching +to_tsquery('post:*') +``` + +Reference: [Full Text Search](https://supabase.com/docs/guides/database/full-text-search) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md new file mode 100644 index 000000000..e3d261ea1 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/advanced-jsonb-indexing.md @@ -0,0 +1,49 @@ +--- +title: Index JSONB Columns for Efficient Querying +impact: MEDIUM +impactDescription: 10-100x faster JSONB queries with proper indexing +tags: jsonb, gin, indexes, json +--- + +## Index JSONB Columns for Efficient Querying + +JSONB queries without indexes scan the entire table. Use GIN indexes for containment queries. + +**Incorrect (no index on JSONB):** + +```sql +create table products ( + id bigint primary key, + attributes jsonb +); + +-- Full table scan for every query +select * from products where attributes @> '{"color": "red"}'; +select * from products where attributes->>'brand' = 'Nike'; +``` + +**Correct (GIN index for JSONB):** + +```sql +-- GIN index for containment operators (@>, ?, ?&, ?|) +create index products_attrs_gin on products using gin (attributes); + +-- Now containment queries use the index +select * from products where attributes @> '{"color": "red"}'; + +-- For specific key lookups, use expression index +create index products_brand_idx on products ((attributes->>'brand')); +select * from products where attributes->>'brand' = 'Nike'; +``` + +Choose the right operator class: + +```sql +-- jsonb_ops (default): supports all operators, larger index +create index idx1 on products using gin (attributes); + +-- jsonb_path_ops: only @> operator, but 2-3x smaller index +create index idx2 on products using gin (attributes jsonb_path_ops); +``` + +Reference: [JSONB Indexes](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md new file mode 100644 index 000000000..40b9cc50d --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-idle-timeout.md @@ -0,0 +1,46 @@ +--- +title: Configure Idle Connection Timeouts +impact: HIGH +impactDescription: Reclaim 30-50% of connection slots from idle clients +tags: connections, timeout, idle, resource-management +--- + +## Configure Idle Connection Timeouts + +Idle connections waste resources. Configure timeouts to automatically reclaim them. + +**Incorrect (connections held indefinitely):** + +```sql +-- No timeout configured +show idle_in_transaction_session_timeout; -- 0 (disabled) + +-- Connections stay open forever, even when idle +select pid, state, state_change, query +from pg_stat_activity +where state = 'idle in transaction'; +-- Shows transactions idle for hours, holding locks +``` + +**Correct (automatic cleanup of idle connections):** + +```sql +-- Terminate connections idle in transaction after 30 seconds +alter system set idle_in_transaction_session_timeout = '30s'; + +-- Terminate completely idle connections after 10 minutes +alter system set idle_session_timeout = '10min'; + +-- Reload configuration +select pg_reload_conf(); +``` + +For pooled connections, configure at the pooler level: + +```ini +# pgbouncer.ini +server_idle_timeout = 60 +client_idle_timeout = 300 +``` + +Reference: [Connection Timeouts](https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md new file mode 100644 index 000000000..cb3e400c0 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-limits.md @@ -0,0 +1,44 @@ +--- +title: Set Appropriate Connection Limits +impact: CRITICAL +impactDescription: Prevent database crashes and memory exhaustion +tags: connections, max-connections, limits, stability +--- + +## Set Appropriate Connection Limits + +Too many connections exhaust memory and degrade performance. Set limits based on available resources. + +**Incorrect (unlimited or excessive connections):** + +```sql +-- Default max_connections = 100, but often increased blindly +show max_connections; -- 500 (way too high for 4GB RAM) + +-- Each connection uses 1-3MB RAM +-- 500 connections * 2MB = 1GB just for connections! +-- Out of memory errors under load +``` + +**Correct (calculate based on resources):** + +```sql +-- Formula: max_connections = (RAM in MB / 5MB per connection) - reserved +-- For 4GB RAM: (4096 / 5) - 10 = ~800 theoretical max +-- But practically, 100-200 is better for query performance + +-- Recommended settings for 4GB RAM +alter system set max_connections = 100; + +-- Also set work_mem appropriately +-- work_mem * max_connections should not exceed 25% of RAM +alter system set work_mem = '8MB'; -- 8MB * 100 = 800MB max +``` + +Monitor connection usage: + +```sql +select count(*), state from pg_stat_activity group by state; +``` + +Reference: [Database Connections](https://supabase.com/docs/guides/platform/performance#connection-management) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md new file mode 100644 index 000000000..e2ebd581d --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-pooling.md @@ -0,0 +1,41 @@ +--- +title: Use Connection Pooling for All Applications +impact: CRITICAL +impactDescription: Handle 10-100x more concurrent users +tags: connection-pooling, pgbouncer, performance, scalability +--- + +## Use Connection Pooling for All Applications + +Postgres connections are expensive (1-3MB RAM each). Without pooling, applications exhaust connections under load. + +**Incorrect (new connection per request):** + +```sql +-- Each request creates a new connection +-- Application code: db.connect() per request +-- Result: 500 concurrent users = 500 connections = crashed database + +-- Check current connections +select count(*) from pg_stat_activity; -- 487 connections! +``` + +**Correct (connection pooling):** + +```sql +-- Use a pooler like PgBouncer between app and database +-- Application connects to pooler, pooler reuses a small pool to Postgres + +-- Configure pool_size based on: (CPU cores * 2) + spindle_count +-- Example for 4 cores: pool_size = 10 + +-- Result: 500 concurrent users share 10 actual connections +select count(*) from pg_stat_activity; -- 10 connections +``` + +Pool modes: + +- **Transaction mode**: connection returned after each transaction (best for most apps) +- **Session mode**: connection held for entire session (needed for prepared statements, temp tables) + +Reference: [Connection Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pooler) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md new file mode 100644 index 000000000..555547d83 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/conn-prepared-statements.md @@ -0,0 +1,46 @@ +--- +title: Use Prepared Statements Correctly with Pooling +impact: HIGH +impactDescription: Avoid prepared statement conflicts in pooled environments +tags: prepared-statements, connection-pooling, transaction-mode +--- + +## Use Prepared Statements Correctly with Pooling + +Prepared statements are tied to individual database connections. In transaction-mode pooling, connections are shared, causing conflicts. + +**Incorrect (named prepared statements with transaction pooling):** + +```sql +-- Named prepared statement +prepare get_user as select * from users where id = $1; + +-- In transaction mode pooling, next request may get different connection +execute get_user(123); +-- ERROR: prepared statement "get_user" does not exist +``` + +**Correct (use unnamed statements or session mode):** + +```sql +-- Option 1: Use unnamed prepared statements (most ORMs do this automatically) +-- The query is prepared and executed in a single protocol message + +-- Option 2: Deallocate after use in transaction mode +prepare get_user as select * from users where id = $1; +execute get_user(123); +deallocate get_user; + +-- Option 3: Use session mode pooling (port 5432 vs 6543) +-- Connection is held for entire session, prepared statements persist +``` + +Check your driver settings: + +```sql +-- Many drivers use prepared statements by default +-- Node.js pg: { prepare: false } to disable +-- JDBC: prepareThreshold=0 to disable +``` + +Reference: [Prepared Statements with Pooling](https://supabase.com/docs/guides/database/connecting-to-postgres#connection-pool-modes) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md new file mode 100644 index 000000000..997947cb1 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-batch-inserts.md @@ -0,0 +1,54 @@ +--- +title: Batch INSERT Statements for Bulk Data +impact: MEDIUM +impactDescription: 10-50x faster bulk inserts +tags: batch, insert, bulk, performance, copy +--- + +## Batch INSERT Statements for Bulk Data + +Individual INSERT statements have high overhead. Batch multiple rows in single statements or use COPY. + +**Incorrect (individual inserts):** + +```sql +-- Each insert is a separate transaction and round trip +insert into events (user_id, action) values (1, 'click'); +insert into events (user_id, action) values (1, 'view'); +insert into events (user_id, action) values (2, 'click'); +-- ... 1000 more individual inserts + +-- 1000 inserts = 1000 round trips = slow +``` + +**Correct (batch insert):** + +```sql +-- Multiple rows in single statement +insert into events (user_id, action) values + (1, 'click'), + (1, 'view'), + (2, 'click'), + -- ... up to ~1000 rows per batch + (999, 'view'); + +-- One round trip for 1000 rows +``` + +For large imports, use COPY: + +```sql +-- COPY is fastest for bulk loading +copy events (user_id, action, created_at) +from '/path/to/data.csv' +with (format csv, header true); + +-- Or from stdin in application +copy events (user_id, action) from stdin with (format csv); +1,click +1,view +2,click +\. +``` + +Reference: [COPY](https://www.postgresql.org/docs/current/sql-copy.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md new file mode 100644 index 000000000..2109186ff --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-n-plus-one.md @@ -0,0 +1,53 @@ +--- +title: Eliminate N+1 Queries with Batch Loading +impact: MEDIUM-HIGH +impactDescription: 10-100x fewer database round trips +tags: n-plus-one, batch, performance, queries +--- + +## Eliminate N+1 Queries with Batch Loading + +N+1 queries execute one query per item in a loop. Batch them into a single query using arrays or JOINs. + +**Incorrect (N+1 queries):** + +```sql +-- First query: get all users +select id from users where active = true; -- Returns 100 IDs + +-- Then N queries, one per user +select * from orders where user_id = 1; +select * from orders where user_id = 2; +select * from orders where user_id = 3; +-- ... 97 more queries! + +-- Total: 101 round trips to database +``` + +**Correct (single batch query):** + +```sql +-- Collect IDs and query once with ANY +select * from orders where user_id = any(array[1, 2, 3, ...]); + +-- Or use JOIN instead of loop +select u.id, u.name, o.* +from users u +left join orders o on o.user_id = u.id +where u.active = true; + +-- Total: 1 round trip +``` + +Application pattern: + +```sql +-- Instead of looping in application code: +-- for user in users: db.query("SELECT * FROM orders WHERE user_id = $1", user.id) + +-- Pass array parameter: +select * from orders where user_id = any($1::bigint[]); +-- Application passes: [1, 2, 3, 4, 5, ...] +``` + +Reference: [N+1 Query Problem](https://supabase.com/docs/guides/database/query-optimization) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md new file mode 100644 index 000000000..633d83932 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-pagination.md @@ -0,0 +1,50 @@ +--- +title: Use Cursor-Based Pagination Instead of OFFSET +impact: MEDIUM-HIGH +impactDescription: Consistent O(1) performance regardless of page depth +tags: pagination, cursor, keyset, offset, performance +--- + +## Use Cursor-Based Pagination Instead of OFFSET + +OFFSET-based pagination scans all skipped rows, getting slower on deeper pages. Cursor pagination is O(1). + +**Incorrect (OFFSET pagination):** + +```sql +-- Page 1: scans 20 rows +select * from products order by id limit 20 offset 0; + +-- Page 100: scans 2000 rows to skip 1980 +select * from products order by id limit 20 offset 1980; + +-- Page 10000: scans 200,000 rows! +select * from products order by id limit 20 offset 199980; +``` + +**Correct (cursor/keyset pagination):** + +```sql +-- Page 1: get first 20 +select * from products order by id limit 20; +-- Application stores last_id = 20 + +-- Page 2: start after last ID +select * from products where id > 20 order by id limit 20; +-- Uses index, always fast regardless of page depth + +-- Page 10000: same speed as page 1 +select * from products where id > 199980 order by id limit 20; +``` + +For multi-column sorting: + +```sql +-- Cursor must include all sort columns +select * from products +where (created_at, id) > ('2024-01-15 10:00:00', 12345) +order by created_at, id +limit 20; +``` + +Reference: [Pagination](https://supabase.com/docs/guides/database/pagination) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md new file mode 100644 index 000000000..bc95e2305 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/data-upsert.md @@ -0,0 +1,50 @@ +--- +title: Use UPSERT for Insert-or-Update Operations +impact: MEDIUM +impactDescription: Atomic operation, eliminates race conditions +tags: upsert, on-conflict, insert, update +--- + +## Use UPSERT for Insert-or-Update Operations + +Using separate SELECT-then-INSERT/UPDATE creates race conditions. Use INSERT ... ON CONFLICT for atomic upserts. + +**Incorrect (check-then-insert race condition):** + +```sql +-- Race condition: two requests check simultaneously +select * from settings where user_id = 123 and key = 'theme'; +-- Both find nothing + +-- Both try to insert +insert into settings (user_id, key, value) values (123, 'theme', 'dark'); +-- One succeeds, one fails with duplicate key error! +``` + +**Correct (atomic UPSERT):** + +```sql +-- Single atomic operation +insert into settings (user_id, key, value) +values (123, 'theme', 'dark') +on conflict (user_id, key) +do update set value = excluded.value, updated_at = now(); + +-- Returns the inserted/updated row +insert into settings (user_id, key, value) +values (123, 'theme', 'dark') +on conflict (user_id, key) +do update set value = excluded.value +returning *; +``` + +Insert-or-ignore pattern: + +```sql +-- Insert only if not exists (no update) +insert into page_views (page_id, user_id) +values (1, 123) +on conflict (page_id, user_id) do nothing; +``` + +Reference: [INSERT ON CONFLICT](https://www.postgresql.org/docs/current/sql-insert.html#SQL-ON-CONFLICT) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md new file mode 100644 index 000000000..572eaf0dc --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-advisory.md @@ -0,0 +1,56 @@ +--- +title: Use Advisory Locks for Application-Level Locking +impact: MEDIUM +impactDescription: Efficient coordination without row-level lock overhead +tags: advisory-locks, coordination, application-locks +--- + +## Use Advisory Locks for Application-Level Locking + +Advisory locks provide application-level coordination without requiring database rows to lock. + +**Incorrect (creating rows just for locking):** + +```sql +-- Creating dummy rows to lock on +create table resource_locks ( + resource_name text primary key +); + +insert into resource_locks values ('report_generator'); + +-- Lock by selecting the row +select * from resource_locks where resource_name = 'report_generator' for update; +``` + +**Correct (advisory locks):** + +```sql +-- Session-level advisory lock (released on disconnect or unlock) +select pg_advisory_lock(hashtext('report_generator')); +-- ... do exclusive work ... +select pg_advisory_unlock(hashtext('report_generator')); + +-- Transaction-level lock (released on commit/rollback) +begin; +select pg_advisory_xact_lock(hashtext('daily_report')); +-- ... do work ... +commit; -- Lock automatically released +``` + +Try-lock for non-blocking operations: + +```sql +-- Returns immediately with true/false instead of waiting +select pg_try_advisory_lock(hashtext('resource_name')); + +-- Use in application +if (acquired) { + -- Do work + select pg_advisory_unlock(hashtext('resource_name')); +} else { + -- Skip or retry later +} +``` + +Reference: [Advisory Locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md new file mode 100644 index 000000000..974da5ede --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-deadlock-prevention.md @@ -0,0 +1,68 @@ +--- +title: Prevent Deadlocks with Consistent Lock Ordering +impact: MEDIUM-HIGH +impactDescription: Eliminate deadlock errors, improve reliability +tags: deadlocks, locking, transactions, ordering +--- + +## Prevent Deadlocks with Consistent Lock Ordering + +Deadlocks occur when transactions lock resources in different orders. Always +acquire locks in a consistent order. + +**Incorrect (inconsistent lock ordering):** + +```sql +-- Transaction A -- Transaction B +begin; begin; +update accounts update accounts +set balance = balance - 100 set balance = balance - 50 +where id = 1; where id = 2; -- B locks row 2 + +update accounts update accounts +set balance = balance + 100 set balance = balance + 50 +where id = 2; -- A waits for B where id = 1; -- B waits for A + +-- DEADLOCK! Both waiting for each other +``` + +**Correct (lock rows in consistent order first):** + +```sql +-- Explicitly acquire locks in ID order before updating +begin; +select * from accounts where id in (1, 2) order by id for update; + +-- Now perform updates in any order - locks already held +update accounts set balance = balance - 100 where id = 1; +update accounts set balance = balance + 100 where id = 2; +commit; +``` + +Alternative: use a single statement to update atomically: + +```sql +-- Single statement acquires all locks atomically +begin; +update accounts +set balance = balance + case id + when 1 then -100 + when 2 then 100 +end +where id in (1, 2); +commit; +``` + +Detect deadlocks in logs: + +```sql +-- Check for recent deadlocks +select * from pg_stat_database where deadlocks > 0; + +-- Enable deadlock logging +set log_lock_waits = on; +set deadlock_timeout = '1s'; +``` + +Reference: +[Deadlocks](https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-DEADLOCKS) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md new file mode 100644 index 000000000..e6b8ef263 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-short-transactions.md @@ -0,0 +1,50 @@ +--- +title: Keep Transactions Short to Reduce Lock Contention +impact: MEDIUM-HIGH +impactDescription: 3-5x throughput improvement, fewer deadlocks +tags: transactions, locking, contention, performance +--- + +## Keep Transactions Short to Reduce Lock Contention + +Long-running transactions hold locks that block other queries. Keep transactions as short as possible. + +**Incorrect (long transaction with external calls):** + +```sql +begin; +select * from orders where id = 1 for update; -- Lock acquired + +-- Application makes HTTP call to payment API (2-5 seconds) +-- Other queries on this row are blocked! + +update orders set status = 'paid' where id = 1; +commit; -- Lock held for entire duration +``` + +**Correct (minimal transaction scope):** + +```sql +-- Validate data and call APIs outside transaction +-- Application: response = await paymentAPI.charge(...) + +-- Only hold lock for the actual update +begin; +update orders +set status = 'paid', payment_id = $1 +where id = $2 and status = 'pending' +returning *; +commit; -- Lock held for milliseconds +``` + +Use `statement_timeout` to prevent runaway transactions: + +```sql +-- Abort queries running longer than 30 seconds +set statement_timeout = '30s'; + +-- Or per-session +set local statement_timeout = '5s'; +``` + +Reference: [Transaction Management](https://www.postgresql.org/docs/current/tutorial-transactions.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md new file mode 100644 index 000000000..77bdbb970 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/lock-skip-locked.md @@ -0,0 +1,54 @@ +--- +title: Use SKIP LOCKED for Non-Blocking Queue Processing +impact: MEDIUM-HIGH +impactDescription: 10x throughput for worker queues +tags: skip-locked, queue, workers, concurrency +--- + +## Use SKIP LOCKED for Non-Blocking Queue Processing + +When multiple workers process a queue, SKIP LOCKED allows workers to process different rows without waiting. + +**Incorrect (workers block each other):** + +```sql +-- Worker 1 and Worker 2 both try to get next job +begin; +select * from jobs where status = 'pending' order by created_at limit 1 for update; +-- Worker 2 waits for Worker 1's lock to release! +``` + +**Correct (SKIP LOCKED for parallel processing):** + +```sql +-- Each worker skips locked rows and gets the next available +begin; +select * from jobs +where status = 'pending' +order by created_at +limit 1 +for update skip locked; + +-- Worker 1 gets job 1, Worker 2 gets job 2 (no waiting) + +update jobs set status = 'processing' where id = $1; +commit; +``` + +Complete queue pattern: + +```sql +-- Atomic claim-and-update in one statement +update jobs +set status = 'processing', worker_id = $1, started_at = now() +where id = ( + select id from jobs + where status = 'pending' + order by created_at + limit 1 + for update skip locked +) +returning *; +``` + +Reference: [SELECT FOR UPDATE SKIP LOCKED](https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md new file mode 100644 index 000000000..542978c3b --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-explain-analyze.md @@ -0,0 +1,45 @@ +--- +title: Use EXPLAIN ANALYZE to Diagnose Slow Queries +impact: LOW-MEDIUM +impactDescription: Identify exact bottlenecks in query execution +tags: explain, analyze, diagnostics, query-plan +--- + +## Use EXPLAIN ANALYZE to Diagnose Slow Queries + +EXPLAIN ANALYZE executes the query and shows actual timings, revealing the true performance bottlenecks. + +**Incorrect (guessing at performance issues):** + +```sql +-- Query is slow, but why? +select * from orders where customer_id = 123 and status = 'pending'; +-- "It must be missing an index" - but which one? +``` + +**Correct (use EXPLAIN ANALYZE):** + +```sql +explain (analyze, buffers, format text) +select * from orders where customer_id = 123 and status = 'pending'; + +-- Output reveals the issue: +-- Seq Scan on orders (cost=0.00..25000.00 rows=50 width=100) (actual time=0.015..450.123 rows=50 loops=1) +-- Filter: ((customer_id = 123) AND (status = 'pending'::text)) +-- Rows Removed by Filter: 999950 +-- Buffers: shared hit=5000 read=15000 +-- Planning Time: 0.150 ms +-- Execution Time: 450.500 ms +``` + +Key things to look for: + +```sql +-- Seq Scan on large tables = missing index +-- Rows Removed by Filter = poor selectivity or missing index +-- Buffers: read >> hit = data not cached, needs more memory +-- Nested Loop with high loops = consider different join strategy +-- Sort Method: external merge = work_mem too low +``` + +Reference: [EXPLAIN](https://supabase.com/docs/guides/database/inspect) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md new file mode 100644 index 000000000..d7e82f1aa --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-pg-stat-statements.md @@ -0,0 +1,55 @@ +--- +title: Enable pg_stat_statements for Query Analysis +impact: LOW-MEDIUM +impactDescription: Identify top resource-consuming queries +tags: pg-stat-statements, monitoring, statistics, performance +--- + +## Enable pg_stat_statements for Query Analysis + +pg_stat_statements tracks execution statistics for all queries, helping identify slow and frequent queries. + +**Incorrect (no visibility into query patterns):** + +```sql +-- Database is slow, but which queries are the problem? +-- No way to know without pg_stat_statements +``` + +**Correct (enable and query pg_stat_statements):** + +```sql +-- Enable the extension +create extension if not exists pg_stat_statements; + +-- Find slowest queries by total time +select + calls, + round(total_exec_time::numeric, 2) as total_time_ms, + round(mean_exec_time::numeric, 2) as mean_time_ms, + query +from pg_stat_statements +order by total_exec_time desc +limit 10; + +-- Find most frequent queries +select calls, query +from pg_stat_statements +order by calls desc +limit 10; + +-- Reset statistics after optimization +select pg_stat_statements_reset(); +``` + +Key metrics to monitor: + +```sql +-- Queries with high mean time (candidates for optimization) +select query, mean_exec_time, calls +from pg_stat_statements +where mean_exec_time > 100 -- > 100ms average +order by mean_exec_time desc; +``` + +Reference: [pg_stat_statements](https://supabase.com/docs/guides/database/extensions/pg_stat_statements) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md new file mode 100644 index 000000000..e0e8ea0b7 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/monitor-vacuum-analyze.md @@ -0,0 +1,55 @@ +--- +title: Maintain Table Statistics with VACUUM and ANALYZE +impact: MEDIUM +impactDescription: 2-10x better query plans with accurate statistics +tags: vacuum, analyze, statistics, maintenance, autovacuum +--- + +## Maintain Table Statistics with VACUUM and ANALYZE + +Outdated statistics cause the query planner to make poor decisions. VACUUM reclaims space, ANALYZE updates statistics. + +**Incorrect (stale statistics):** + +```sql +-- Table has 1M rows but stats say 1000 +-- Query planner chooses wrong strategy +explain select * from orders where status = 'pending'; +-- Shows: Seq Scan (because stats show small table) +-- Actually: Index Scan would be much faster +``` + +**Correct (maintain fresh statistics):** + +```sql +-- Manually analyze after large data changes +analyze orders; + +-- Analyze specific columns used in WHERE clauses +analyze orders (status, created_at); + +-- Check when tables were last analyzed +select + relname, + last_vacuum, + last_autovacuum, + last_analyze, + last_autoanalyze +from pg_stat_user_tables +order by last_analyze nulls first; +``` + +Autovacuum tuning for busy tables: + +```sql +-- Increase frequency for high-churn tables +alter table orders set ( + autovacuum_vacuum_scale_factor = 0.05, -- Vacuum at 5% dead tuples (default 20%) + autovacuum_analyze_scale_factor = 0.02 -- Analyze at 2% changes (default 10%) +); + +-- Check autovacuum status +select * from pg_stat_progress_vacuum; +``` + +Reference: [VACUUM](https://supabase.com/docs/guides/database/database-size#vacuum-operations) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md new file mode 100644 index 000000000..fea64523f --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-composite-indexes.md @@ -0,0 +1,44 @@ +--- +title: Create Composite Indexes for Multi-Column Queries +impact: HIGH +impactDescription: 5-10x faster multi-column queries +tags: indexes, composite-index, multi-column, query-optimization +--- + +## Create Composite Indexes for Multi-Column Queries + +When queries filter on multiple columns, a composite index is more efficient than separate single-column indexes. + +**Incorrect (separate indexes require bitmap scan):** + +```sql +-- Two separate indexes +create index orders_status_idx on orders (status); +create index orders_created_idx on orders (created_at); + +-- Query must combine both indexes (slower) +select * from orders where status = 'pending' and created_at > '2024-01-01'; +``` + +**Correct (composite index):** + +```sql +-- Single composite index (leftmost column first for equality checks) +create index orders_status_created_idx on orders (status, created_at); + +-- Query uses one efficient index scan +select * from orders where status = 'pending' and created_at > '2024-01-01'; +``` + +**Column order matters** - place equality columns first, range columns last: + +```sql +-- Good: status (=) before created_at (>) +create index idx on orders (status, created_at); + +-- Works for: WHERE status = 'pending' +-- Works for: WHERE status = 'pending' AND created_at > '2024-01-01' +-- Does NOT work for: WHERE created_at > '2024-01-01' (leftmost prefix rule) +``` + +Reference: [Multicolumn Indexes](https://www.postgresql.org/docs/current/indexes-multicolumn.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md new file mode 100644 index 000000000..9d2a4947b --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-covering-indexes.md @@ -0,0 +1,40 @@ +--- +title: Use Covering Indexes to Avoid Table Lookups +impact: MEDIUM-HIGH +impactDescription: 2-5x faster queries by eliminating heap fetches +tags: indexes, covering-index, include, index-only-scan +--- + +## Use Covering Indexes to Avoid Table Lookups + +Covering indexes include all columns needed by a query, enabling index-only scans that skip the table entirely. + +**Incorrect (index scan + heap fetch):** + +```sql +create index users_email_idx on users (email); + +-- Must fetch name and created_at from table heap +select email, name, created_at from users where email = 'user@example.com'; +``` + +**Correct (index-only scan with INCLUDE):** + +```sql +-- Include non-searchable columns in the index +create index users_email_idx on users (email) include (name, created_at); + +-- All columns served from index, no table access needed +select email, name, created_at from users where email = 'user@example.com'; +``` + +Use INCLUDE for columns you SELECT but don't filter on: + +```sql +-- Searching by status, but also need customer_id and total +create index orders_status_idx on orders (status) include (customer_id, total); + +select status, customer_id, total from orders where status = 'shipped'; +``` + +Reference: [Index-Only Scans](https://www.postgresql.org/docs/current/indexes-index-only-scans.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md new file mode 100644 index 000000000..93b32590f --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-index-types.md @@ -0,0 +1,48 @@ +--- +title: Choose the Right Index Type for Your Data +impact: HIGH +impactDescription: 10-100x improvement with correct index type +tags: indexes, btree, gin, gist, brin, hash, index-types +--- + +## Choose the Right Index Type for Your Data + +Different index types excel at different query patterns. The default B-tree isn't always optimal. + +**Incorrect (B-tree for JSONB containment):** + +```sql +-- B-tree cannot optimize containment operators +create index products_attrs_idx on products (attributes); +select * from products where attributes @> '{"color": "red"}'; +-- Full table scan - B-tree doesn't support @> operator +``` + +**Correct (GIN for JSONB):** + +```sql +-- GIN supports @>, ?, ?&, ?| operators +create index products_attrs_idx on products using gin (attributes); +select * from products where attributes @> '{"color": "red"}'; +``` + +Index type guide: + +```sql +-- B-tree (default): =, <, >, BETWEEN, IN, IS NULL +create index users_created_idx on users (created_at); + +-- GIN: arrays, JSONB, full-text search +create index posts_tags_idx on posts using gin (tags); + +-- GiST: geometric data, range types, nearest-neighbor (KNN) queries +create index locations_idx on places using gist (location); + +-- BRIN: large time-series tables (10-100x smaller) +create index events_time_idx on events using brin (created_at); + +-- Hash: equality-only (slightly faster than B-tree for =) +create index sessions_token_idx on sessions using hash (token); +``` + +Reference: [Index Types](https://www.postgresql.org/docs/current/indexes-types.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md new file mode 100644 index 000000000..e6daace7d --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-missing-indexes.md @@ -0,0 +1,43 @@ +--- +title: Add Indexes on WHERE and JOIN Columns +impact: CRITICAL +impactDescription: 100-1000x faster queries on large tables +tags: indexes, performance, sequential-scan, query-optimization +--- + +## Add Indexes on WHERE and JOIN Columns + +Queries filtering or joining on unindexed columns cause full table scans, which become exponentially slower as tables grow. + +**Incorrect (sequential scan on large table):** + +```sql +-- No index on customer_id causes full table scan +select * from orders where customer_id = 123; + +-- EXPLAIN shows: Seq Scan on orders (cost=0.00..25000.00 rows=100 width=85) +``` + +**Correct (index scan):** + +```sql +-- Create index on frequently filtered column +create index orders_customer_id_idx on orders (customer_id); + +select * from orders where customer_id = 123; + +-- EXPLAIN shows: Index Scan using orders_customer_id_idx (cost=0.42..8.44 rows=100 width=85) +``` + +For JOIN columns, always index the foreign key side: + +```sql +-- Index the referencing column +create index orders_customer_id_idx on orders (customer_id); + +select c.name, o.total +from customers c +join orders o on o.customer_id = c.id; +``` + +Reference: [Query Optimization](https://supabase.com/docs/guides/database/query-optimization) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md new file mode 100644 index 000000000..3e61a3417 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/query-partial-indexes.md @@ -0,0 +1,45 @@ +--- +title: Use Partial Indexes for Filtered Queries +impact: HIGH +impactDescription: 5-20x smaller indexes, faster writes and queries +tags: indexes, partial-index, query-optimization, storage +--- + +## Use Partial Indexes for Filtered Queries + +Partial indexes only include rows matching a WHERE condition, making them smaller and faster when queries consistently filter on the same condition. + +**Incorrect (full index includes irrelevant rows):** + +```sql +-- Index includes all rows, even soft-deleted ones +create index users_email_idx on users (email); + +-- Query always filters active users +select * from users where email = 'user@example.com' and deleted_at is null; +``` + +**Correct (partial index matches query filter):** + +```sql +-- Index only includes active users +create index users_active_email_idx on users (email) +where deleted_at is null; + +-- Query uses the smaller, faster index +select * from users where email = 'user@example.com' and deleted_at is null; +``` + +Common use cases for partial indexes: + +```sql +-- Only pending orders (status rarely changes once completed) +create index orders_pending_idx on orders (created_at) +where status = 'pending'; + +-- Only non-null values +create index products_sku_idx on products (sku) +where sku is not null; +``` + +Reference: [Partial Indexes](https://www.postgresql.org/docs/current/indexes-partial.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md new file mode 100644 index 000000000..1d2ef8f9a --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-constraints.md @@ -0,0 +1,80 @@ +--- +title: Add Constraints Safely in Migrations +impact: HIGH +impactDescription: Prevents migration failures and enables idempotent schema changes +tags: constraints, migrations, schema, alter-table +--- + +## Add Constraints Safely in Migrations + +PostgreSQL does not support `ADD CONSTRAINT IF NOT EXISTS`. Migrations using this syntax will fail. + +**Incorrect (causes syntax error):** + +```sql +-- ERROR: syntax error at or near "not" (SQLSTATE 42601) +alter table public.profiles +add constraint if not exists profiles_birthchart_id_unique unique (birthchart_id); +``` + +**Correct (idempotent constraint creation):** + +```sql +-- Use DO block to check before adding +do $$ +begin + if not exists ( + select 1 from pg_constraint + where conname = 'profiles_birthchart_id_unique' + and conrelid = 'public.profiles'::regclass + ) then + alter table public.profiles + add constraint profiles_birthchart_id_unique unique (birthchart_id); + end if; +end $$; +``` + +For all constraint types: + +```sql +-- Check constraints +do $$ +begin + if not exists ( + select 1 from pg_constraint + where conname = 'check_age_positive' + ) then + alter table users add constraint check_age_positive check (age > 0); + end if; +end $$; + +-- Foreign keys +do $$ +begin + if not exists ( + select 1 from pg_constraint + where conname = 'profiles_birthchart_id_fkey' + ) then + alter table profiles + add constraint profiles_birthchart_id_fkey + foreign key (birthchart_id) references birthcharts(id); + end if; +end $$; +``` + +Check if constraint exists: + +```sql +-- Query to check constraint existence +select conname, contype, pg_get_constraintdef(oid) +from pg_constraint +where conrelid = 'public.profiles'::regclass; + +-- contype values: +-- 'p' = PRIMARY KEY +-- 'f' = FOREIGN KEY +-- 'u' = UNIQUE +-- 'c' = CHECK +``` + +Reference: [Constraints](https://www.postgresql.org/docs/current/ddl-constraints.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md new file mode 100644 index 000000000..f253a5819 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-data-types.md @@ -0,0 +1,46 @@ +--- +title: Choose Appropriate Data Types +impact: HIGH +impactDescription: 50% storage reduction, faster comparisons +tags: data-types, schema, storage, performance +--- + +## Choose Appropriate Data Types + +Using the right data types reduces storage, improves query performance, and prevents bugs. + +**Incorrect (wrong data types):** + +```sql +create table users ( + id int, -- Will overflow at 2.1 billion + email varchar(255), -- Unnecessary length limit + created_at timestamp, -- Missing timezone info + is_active varchar(5), -- String for boolean + price varchar(20) -- String for numeric +); +``` + +**Correct (appropriate data types):** + +```sql +create table users ( + id bigint generated always as identity primary key, -- 9 quintillion max + email text, -- No artificial limit, same performance as varchar + created_at timestamptz, -- Always store timezone-aware timestamps + is_active boolean default true, -- 1 byte vs variable string length + price numeric(10,2) -- Exact decimal arithmetic +); +``` + +Key guidelines: + +```sql +-- IDs: use bigint, not int (future-proofing) +-- Strings: use text, not varchar(n) unless constraint needed +-- Time: use timestamptz, not timestamp +-- Money: use numeric, not float (precision matters) +-- Enums: use text with check constraint or create enum type +``` + +Reference: [Data Types](https://www.postgresql.org/docs/current/datatype.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md new file mode 100644 index 000000000..6c3d6ff62 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-foreign-key-indexes.md @@ -0,0 +1,59 @@ +--- +title: Index Foreign Key Columns +impact: HIGH +impactDescription: 10-100x faster JOINs and CASCADE operations +tags: foreign-key, indexes, joins, schema +--- + +## Index Foreign Key Columns + +Postgres does not automatically index foreign key columns. Missing indexes cause slow JOINs and CASCADE operations. + +**Incorrect (unindexed foreign key):** + +```sql +create table orders ( + id bigint generated always as identity primary key, + customer_id bigint references customers(id) on delete cascade, + total numeric(10,2) +); + +-- No index on customer_id! +-- JOINs and ON DELETE CASCADE both require full table scan +select * from orders where customer_id = 123; -- Seq Scan +delete from customers where id = 123; -- Locks table, scans all orders +``` + +**Correct (indexed foreign key):** + +```sql +create table orders ( + id bigint generated always as identity primary key, + customer_id bigint references customers(id) on delete cascade, + total numeric(10,2) +); + +-- Always index the FK column +create index orders_customer_id_idx on orders (customer_id); + +-- Now JOINs and cascades are fast +select * from orders where customer_id = 123; -- Index Scan +delete from customers where id = 123; -- Uses index, fast cascade +``` + +Find missing FK indexes: + +```sql +select + conrelid::regclass as table_name, + a.attname as fk_column +from pg_constraint c +join pg_attribute a on a.attrelid = c.conrelid and a.attnum = any(c.conkey) +where c.contype = 'f' + and not exists ( + select 1 from pg_index i + where i.indrelid = c.conrelid and a.attnum = any(i.indkey) + ); +``` + +Reference: [Foreign Keys](https://www.postgresql.org/docs/current/ddl-constraints.html#DDL-CONSTRAINTS-FK) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md new file mode 100644 index 000000000..f0072940b --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-lowercase-identifiers.md @@ -0,0 +1,55 @@ +--- +title: Use Lowercase Identifiers for Compatibility +impact: MEDIUM +impactDescription: Avoid case-sensitivity bugs with tools, ORMs, and AI assistants +tags: naming, identifiers, case-sensitivity, schema, conventions +--- + +## Use Lowercase Identifiers for Compatibility + +PostgreSQL folds unquoted identifiers to lowercase. Quoted mixed-case identifiers require quotes forever and cause issues with tools, ORMs, and AI assistants that may not recognize them. + +**Incorrect (mixed-case identifiers):** + +```sql +-- Quoted identifiers preserve case but require quotes everywhere +CREATE TABLE "Users" ( + "userId" bigint PRIMARY KEY, + "firstName" text, + "lastName" text +); + +-- Must always quote or queries fail +SELECT "firstName" FROM "Users" WHERE "userId" = 1; + +-- This fails - Users becomes users without quotes +SELECT firstName FROM Users; +-- ERROR: relation "users" does not exist +``` + +**Correct (lowercase snake_case):** + +```sql +-- Unquoted lowercase identifiers are portable and tool-friendly +CREATE TABLE users ( + user_id bigint PRIMARY KEY, + first_name text, + last_name text +); + +-- Works without quotes, recognized by all tools +SELECT first_name FROM users WHERE user_id = 1; +``` + +Common sources of mixed-case identifiers: + +```sql +-- ORMs often generate quoted camelCase - configure them to use snake_case +-- Migrations from other databases may preserve original casing +-- Some GUI tools quote identifiers by default - disable this + +-- If stuck with mixed-case, create views as a compatibility layer +CREATE VIEW users AS SELECT "userId" AS user_id, "firstName" AS first_name FROM "Users"; +``` + +Reference: [Identifiers and Key Words](https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md new file mode 100644 index 000000000..13137a032 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-partitioning.md @@ -0,0 +1,55 @@ +--- +title: Partition Large Tables for Better Performance +impact: MEDIUM-HIGH +impactDescription: 5-20x faster queries and maintenance on large tables +tags: partitioning, large-tables, time-series, performance +--- + +## Partition Large Tables for Better Performance + +Partitioning splits a large table into smaller pieces, improving query performance and maintenance operations. + +**Incorrect (single large table):** + +```sql +create table events ( + id bigint generated always as identity, + created_at timestamptz, + data jsonb +); + +-- 500M rows, queries scan everything +select * from events where created_at > '2024-01-01'; -- Slow +vacuum events; -- Takes hours, locks table +``` + +**Correct (partitioned by time range):** + +```sql +create table events ( + id bigint generated always as identity, + created_at timestamptz not null, + data jsonb +) partition by range (created_at); + +-- Create partitions for each month +create table events_2024_01 partition of events + for values from ('2024-01-01') to ('2024-02-01'); + +create table events_2024_02 partition of events + for values from ('2024-02-01') to ('2024-03-01'); + +-- Queries only scan relevant partitions +select * from events where created_at > '2024-01-15'; -- Only scans events_2024_01+ + +-- Drop old data instantly +drop table events_2023_01; -- Instant vs DELETE taking hours +``` + +When to partition: + +- Tables > 100M rows +- Time-series data with date-based queries +- Need to efficiently drop old data + +Reference: [Table Partitioning](https://www.postgresql.org/docs/current/ddl-partitioning.html) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md new file mode 100644 index 000000000..fb0fbb166 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/schema-primary-keys.md @@ -0,0 +1,61 @@ +--- +title: Select Optimal Primary Key Strategy +impact: HIGH +impactDescription: Better index locality, reduced fragmentation +tags: primary-key, identity, uuid, serial, schema +--- + +## Select Optimal Primary Key Strategy + +Primary key choice affects insert performance, index size, and replication +efficiency. + +**Incorrect (problematic PK choices):** + +```sql +-- identity is the SQL-standard approach +create table users ( + id serial primary key -- Works, but IDENTITY is recommended +); + +-- Random UUIDs (v4) cause index fragmentation +create table orders ( + id uuid default gen_random_uuid() primary key -- UUIDv4 = random = scattered inserts +); +``` + +**Correct (optimal PK strategies):** + +```sql +-- Use IDENTITY for sequential IDs (SQL-standard, best for most cases) +create table users ( + id bigint generated always as identity primary key +); + +-- For distributed systems needing UUIDs, use UUIDv7 (time-ordered) +-- Requires pg_uuidv7 extension: create extension pg_uuidv7; +create table orders ( + id uuid default uuid_generate_v7() primary key -- Time-ordered, no fragmentation +); + +-- Alternative: time-prefixed IDs for sortable, distributed IDs (no extension needed) +create table events ( + id text default concat( + to_char(now() at time zone 'utc', 'YYYYMMDDHH24MISSMS'), + gen_random_uuid()::text + ) primary key +); +``` + +Guidelines: + +- Single database: `bigint identity` (sequential, 8 bytes, SQL-standard) +- Distributed/exposed IDs: UUIDv7 (requires pg_uuidv7) or ULID (time-ordered, no + fragmentation) +- `serial` works but `identity` is SQL-standard and preferred for new + applications +- Avoid random UUIDs (v4) as primary keys on large tables (causes index + fragmentation) + +Reference: +[Identity Columns](https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-PARMS-GENERATED-IDENTITY) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md new file mode 100644 index 000000000..448ec345f --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-privileges.md @@ -0,0 +1,54 @@ +--- +title: Apply Principle of Least Privilege +impact: MEDIUM +impactDescription: Reduced attack surface, better audit trail +tags: privileges, security, roles, permissions +--- + +## Apply Principle of Least Privilege + +Grant only the minimum permissions required. Never use superuser for application queries. + +**Incorrect (overly broad permissions):** + +```sql +-- Application uses superuser connection +-- Or grants ALL to application role +grant all privileges on all tables in schema public to app_user; +grant all privileges on all sequences in schema public to app_user; + +-- Any SQL injection becomes catastrophic +-- drop table users; cascades to everything +``` + +**Correct (minimal, specific grants):** + +```sql +-- Create role with no default privileges +create role app_readonly nologin; + +-- Grant only SELECT on specific tables +grant usage on schema public to app_readonly; +grant select on public.products, public.categories to app_readonly; + +-- Create role for writes with limited scope +create role app_writer nologin; +grant usage on schema public to app_writer; +grant select, insert, update on public.orders to app_writer; +grant usage on sequence orders_id_seq to app_writer; +-- No DELETE permission + +-- Login role inherits from these +create role app_user login password 'xxx'; +grant app_writer to app_user; +``` + +Revoke public defaults: + +```sql +-- Revoke default public access +revoke all on schema public from public; +revoke all on all tables in schema public from public; +``` + +Reference: [Roles and Privileges](https://supabase.com/blog/postgres-roles-and-privileges) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md new file mode 100644 index 000000000..c61e1a850 --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-basics.md @@ -0,0 +1,50 @@ +--- +title: Enable Row Level Security for Multi-Tenant Data +impact: CRITICAL +impactDescription: Database-enforced tenant isolation, prevent data leaks +tags: rls, row-level-security, multi-tenant, security +--- + +## Enable Row Level Security for Multi-Tenant Data + +Row Level Security (RLS) enforces data access at the database level, ensuring users only see their own data. + +**Incorrect (application-level filtering only):** + +```sql +-- Relying only on application to filter +select * from orders where user_id = $current_user_id; + +-- Bug or bypass means all data is exposed! +select * from orders; -- Returns ALL orders +``` + +**Correct (database-enforced RLS):** + +```sql +-- Enable RLS on the table +alter table orders enable row level security; + +-- Create policy for users to see only their orders +create policy orders_user_policy on orders + for all + using (user_id = current_setting('app.current_user_id')::bigint); + +-- Force RLS even for table owners +alter table orders force row level security; + +-- Set user context and query +set app.current_user_id = '123'; +select * from orders; -- Only returns orders for user 123 +``` + +Policy for authenticated role: + +```sql +create policy orders_user_policy on orders + for all + to authenticated + using (user_id = auth.uid()); +``` + +Reference: [Row Level Security](https://supabase.com/docs/guides/database/postgres/row-level-security) diff --git a/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md new file mode 100644 index 000000000..b32d92f7e --- /dev/null +++ b/devcon/.agents/skills/supabase-postgres-best-practices/references/security-rls-performance.md @@ -0,0 +1,57 @@ +--- +title: Optimize RLS Policies for Performance +impact: HIGH +impactDescription: 5-10x faster RLS queries with proper patterns +tags: rls, performance, security, optimization +--- + +## Optimize RLS Policies for Performance + +Poorly written RLS policies can cause severe performance issues. Use subqueries and indexes strategically. + +**Incorrect (function called for every row):** + +```sql +create policy orders_policy on orders + using (auth.uid() = user_id); -- auth.uid() called per row! + +-- With 1M rows, auth.uid() is called 1M times +``` + +**Correct (wrap functions in SELECT):** + +```sql +create policy orders_policy on orders + using ((select auth.uid()) = user_id); -- Called once, cached + +-- 100x+ faster on large tables +``` + +Use security definer functions for complex checks: + +```sql +-- Create helper function (runs as definer, bypasses RLS) +create or replace function is_team_member(team_id bigint) +returns boolean +language sql +security definer +set search_path = '' +as $$ + select exists ( + select 1 from public.team_members + where team_id = $1 and user_id = (select auth.uid()) + ); +$$; + +-- Use in policy (indexed lookup, not per-row check) +create policy team_orders_policy on orders + using ((select is_team_member(team_id))); +``` + +Always add indexes on columns used in RLS policies: + +```sql +create index orders_user_id_idx on orders (user_id); +``` + +Reference: [RLS Performance](https://supabase.com/docs/guides/database/postgres/row-level-security#rls-performance-recommendations) diff --git a/devcon/.agents/skills/vercel-react-best-practices/AGENTS.md b/devcon/.agents/skills/vercel-react-best-practices/AGENTS.md new file mode 100644 index 000000000..94c3c8441 --- /dev/null +++ b/devcon/.agents/skills/vercel-react-best-practices/AGENTS.md @@ -0,0 +1,2975 @@ +# React Best Practices + +**Version 1.0.0** +Vercel Engineering +January 2026 + +> **Note:** +> This document is mainly for agents and LLMs to follow when maintaining, +> generating, or refactoring React and Next.js codebases. Humans +> may also find it useful, but guidance here is optimized for automation +> and consistency by AI-assisted workflows. + +--- + +## Abstract + +Comprehensive performance optimization guide for React and Next.js applications, designed for AI agents and LLMs. Contains 40+ rules across 8 categories, prioritized by impact from critical (eliminating waterfalls, reducing bundle size) to incremental (advanced patterns). Each rule includes detailed explanations, real-world examples comparing incorrect vs. correct implementations, and specific impact metrics to guide automated refactoring and code generation. + +--- + +## Table of Contents + +1. [Eliminating Waterfalls](#1-eliminating-waterfalls) — **CRITICAL** + - 1.1 [Defer Await Until Needed](#11-defer-await-until-needed) + - 1.2 [Dependency-Based Parallelization](#12-dependency-based-parallelization) + - 1.3 [Prevent Waterfall Chains in API Routes](#13-prevent-waterfall-chains-in-api-routes) + - 1.4 [Promise.all() for Independent Operations](#14-promiseall-for-independent-operations) + - 1.5 [Strategic Suspense Boundaries](#15-strategic-suspense-boundaries) +2. [Bundle Size Optimization](#2-bundle-size-optimization) — **CRITICAL** + - 2.1 [Avoid Barrel File Imports](#21-avoid-barrel-file-imports) + - 2.2 [Conditional Module Loading](#22-conditional-module-loading) + - 2.3 [Defer Non-Critical Third-Party Libraries](#23-defer-non-critical-third-party-libraries) + - 2.4 [Dynamic Imports for Heavy Components](#24-dynamic-imports-for-heavy-components) + - 2.5 [Preload Based on User Intent](#25-preload-based-on-user-intent) +3. [Server-Side Performance](#3-server-side-performance) — **HIGH** + - 3.1 [Authenticate Server Actions Like API Routes](#31-authenticate-server-actions-like-api-routes) + - 3.2 [Avoid Duplicate Serialization in RSC Props](#32-avoid-duplicate-serialization-in-rsc-props) + - 3.3 [Cross-Request LRU Caching](#33-cross-request-lru-caching) + - 3.4 [Hoist Static I/O to Module Level](#34-hoist-static-io-to-module-level) + - 3.5 [Minimize Serialization at RSC Boundaries](#35-minimize-serialization-at-rsc-boundaries) + - 3.6 [Parallel Data Fetching with Component Composition](#36-parallel-data-fetching-with-component-composition) + - 3.7 [Per-Request Deduplication with React.cache()](#37-per-request-deduplication-with-reactcache) + - 3.8 [Use after() for Non-Blocking Operations](#38-use-after-for-non-blocking-operations) +4. [Client-Side Data Fetching](#4-client-side-data-fetching) — **MEDIUM-HIGH** + - 4.1 [Deduplicate Global Event Listeners](#41-deduplicate-global-event-listeners) + - 4.2 [Use Passive Event Listeners for Scrolling Performance](#42-use-passive-event-listeners-for-scrolling-performance) + - 4.3 [Use SWR for Automatic Deduplication](#43-use-swr-for-automatic-deduplication) + - 4.4 [Version and Minimize localStorage Data](#44-version-and-minimize-localstorage-data) +5. [Re-render Optimization](#5-re-render-optimization) — **MEDIUM** + - 5.1 [Calculate Derived State During Rendering](#51-calculate-derived-state-during-rendering) + - 5.2 [Defer State Reads to Usage Point](#52-defer-state-reads-to-usage-point) + - 5.3 [Do not wrap a simple expression with a primitive result type in useMemo](#53-do-not-wrap-a-simple-expression-with-a-primitive-result-type-in-usememo) + - 5.4 [Extract Default Non-primitive Parameter Value from Memoized Component to Constant](#54-extract-default-non-primitive-parameter-value-from-memoized-component-to-constant) + - 5.5 [Extract to Memoized Components](#55-extract-to-memoized-components) + - 5.6 [Narrow Effect Dependencies](#56-narrow-effect-dependencies) + - 5.7 [Put Interaction Logic in Event Handlers](#57-put-interaction-logic-in-event-handlers) + - 5.8 [Subscribe to Derived State](#58-subscribe-to-derived-state) + - 5.9 [Use Functional setState Updates](#59-use-functional-setstate-updates) + - 5.10 [Use Lazy State Initialization](#510-use-lazy-state-initialization) + - 5.11 [Use Transitions for Non-Urgent Updates](#511-use-transitions-for-non-urgent-updates) + - 5.12 [Use useRef for Transient Values](#512-use-useref-for-transient-values) +6. [Rendering Performance](#6-rendering-performance) — **MEDIUM** + - 6.1 [Animate SVG Wrapper Instead of SVG Element](#61-animate-svg-wrapper-instead-of-svg-element) + - 6.2 [CSS content-visibility for Long Lists](#62-css-content-visibility-for-long-lists) + - 6.3 [Hoist Static JSX Elements](#63-hoist-static-jsx-elements) + - 6.4 [Optimize SVG Precision](#64-optimize-svg-precision) + - 6.5 [Prevent Hydration Mismatch Without Flickering](#65-prevent-hydration-mismatch-without-flickering) + - 6.6 [Suppress Expected Hydration Mismatches](#66-suppress-expected-hydration-mismatches) + - 6.7 [Use Activity Component for Show/Hide](#67-use-activity-component-for-showhide) + - 6.8 [Use Explicit Conditional Rendering](#68-use-explicit-conditional-rendering) + - 6.9 [Use useTransition Over Manual Loading States](#69-use-usetransition-over-manual-loading-states) +7. [JavaScript Performance](#7-javascript-performance) — **LOW-MEDIUM** + - 7.1 [Avoid Layout Thrashing](#71-avoid-layout-thrashing) + - 7.2 [Build Index Maps for Repeated Lookups](#72-build-index-maps-for-repeated-lookups) + - 7.3 [Cache Property Access in Loops](#73-cache-property-access-in-loops) + - 7.4 [Cache Repeated Function Calls](#74-cache-repeated-function-calls) + - 7.5 [Cache Storage API Calls](#75-cache-storage-api-calls) + - 7.6 [Combine Multiple Array Iterations](#76-combine-multiple-array-iterations) + - 7.7 [Early Length Check for Array Comparisons](#77-early-length-check-for-array-comparisons) + - 7.8 [Early Return from Functions](#78-early-return-from-functions) + - 7.9 [Hoist RegExp Creation](#79-hoist-regexp-creation) + - 7.10 [Use Loop for Min/Max Instead of Sort](#710-use-loop-for-minmax-instead-of-sort) + - 7.11 [Use Set/Map for O(1) Lookups](#711-use-setmap-for-o1-lookups) + - 7.12 [Use toSorted() Instead of sort() for Immutability](#712-use-tosorted-instead-of-sort-for-immutability) +8. [Advanced Patterns](#8-advanced-patterns) — **LOW** + - 8.1 [Initialize App Once, Not Per Mount](#81-initialize-app-once-not-per-mount) + - 8.2 [Store Event Handlers in Refs](#82-store-event-handlers-in-refs) + - 8.3 [useEffectEvent for Stable Callback Refs](#83-useeffectevent-for-stable-callback-refs) + +--- + +## 1. Eliminating Waterfalls + +**Impact: CRITICAL** + +Waterfalls are the #1 performance killer. Each sequential await adds full network latency. Eliminating them yields the largest gains. + +### 1.1 Defer Await Until Needed + +**Impact: HIGH (avoids blocking unused code paths)** + +Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them. + +**Incorrect: blocks both branches** + +```typescript +async function handleRequest(userId: string, skipProcessing: boolean) { + const userData = await fetchUserData(userId) + + if (skipProcessing) { + // Returns immediately but still waited for userData + return { skipped: true } + } + + // Only this branch uses userData + return processUserData(userData) +} +``` + +**Correct: only blocks when needed** + +```typescript +async function handleRequest(userId: string, skipProcessing: boolean) { + if (skipProcessing) { + // Returns immediately without waiting + return { skipped: true } + } + + // Fetch only when needed + const userData = await fetchUserData(userId) + return processUserData(userData) +} +``` + +**Another example: early return optimization** + +```typescript +// Incorrect: always fetches permissions +async function updateResource(resourceId: string, userId: string) { + const permissions = await fetchPermissions(userId) + const resource = await getResource(resourceId) + + if (!resource) { + return { error: 'Not found' } + } + + if (!permissions.canEdit) { + return { error: 'Forbidden' } + } + + return await updateResourceData(resource, permissions) +} + +// Correct: fetches only when needed +async function updateResource(resourceId: string, userId: string) { + const resource = await getResource(resourceId) + + if (!resource) { + return { error: 'Not found' } + } + + const permissions = await fetchPermissions(userId) + + if (!permissions.canEdit) { + return { error: 'Forbidden' } + } + + return await updateResourceData(resource, permissions) +} +``` + +This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive. + +### 1.2 Dependency-Based Parallelization + +**Impact: CRITICAL (2-10× improvement)** + +For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment. + +**Incorrect: profile waits for config unnecessarily** + +```typescript +const [user, config] = await Promise.all([ + fetchUser(), + fetchConfig() +]) +const profile = await fetchProfile(user.id) +``` + +**Correct: config and profile run in parallel** + +```typescript +import { all } from 'better-all' + +const { user, config, profile } = await all({ + async user() { return fetchUser() }, + async config() { return fetchConfig() }, + async profile() { + return fetchProfile((await this.$.user).id) + } +}) +``` + +**Alternative without extra dependencies:** + +```typescript +const userPromise = fetchUser() +const profilePromise = userPromise.then(user => fetchProfile(user.id)) + +const [user, config, profile] = await Promise.all([ + userPromise, + fetchConfig(), + profilePromise +]) +``` + +We can also create all the promises first, and do `Promise.all()` at the end. + +Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all) + +### 1.3 Prevent Waterfall Chains in API Routes + +**Impact: CRITICAL (2-10× improvement)** + +In API routes and Server Actions, start independent operations immediately, even if you don't await them yet. + +**Incorrect: config waits for auth, data waits for both** + +```typescript +export async function GET(request: Request) { + const session = await auth() + const config = await fetchConfig() + const data = await fetchData(session.user.id) + return Response.json({ data, config }) +} +``` + +**Correct: auth and config start immediately** + +```typescript +export async function GET(request: Request) { + const sessionPromise = auth() + const configPromise = fetchConfig() + const session = await sessionPromise + const [config, data] = await Promise.all([ + configPromise, + fetchData(session.user.id) + ]) + return Response.json({ data, config }) +} +``` + +For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization). + +### 1.4 Promise.all() for Independent Operations + +**Impact: CRITICAL (2-10× improvement)** + +When async operations have no interdependencies, execute them concurrently using `Promise.all()`. + +**Incorrect: sequential execution, 3 round trips** + +```typescript +const user = await fetchUser() +const posts = await fetchPosts() +const comments = await fetchComments() +``` + +**Correct: parallel execution, 1 round trip** + +```typescript +const [user, posts, comments] = await Promise.all([ + fetchUser(), + fetchPosts(), + fetchComments() +]) +``` + +### 1.5 Strategic Suspense Boundaries + +**Impact: HIGH (faster initial paint)** + +Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads. + +**Incorrect: wrapper blocked by data fetching** + +```tsx +async function Page() { + const data = await fetchData() // Blocks entire page + + return ( +
+
Sidebar
+
Header
+
+ +
+
Footer
+
+ ) +} +``` + +The entire layout waits for data even though only the middle section needs it. + +**Correct: wrapper shows immediately, data streams in** + +```tsx +function Page() { + return ( +
+
Sidebar
+
Header
+
+ }> + + +
+
Footer
+
+ ) +} + +async function DataDisplay() { + const data = await fetchData() // Only blocks this component + return
{data.content}
+} +``` + +Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data. + +**Alternative: share promise across components** + +```tsx +function Page() { + // Start fetch immediately, but don't await + const dataPromise = fetchData() + + return ( +
+
Sidebar
+
Header
+ }> + + + +
Footer
+
+ ) +} + +function DataDisplay({ dataPromise }: { dataPromise: Promise }) { + const data = use(dataPromise) // Unwraps the promise + return
{data.content}
+} + +function DataSummary({ dataPromise }: { dataPromise: Promise }) { + const data = use(dataPromise) // Reuses the same promise + return
{data.summary}
+} +``` + +Both components share the same promise, so only one fetch occurs. Layout renders immediately while both components wait together. + +**When NOT to use this pattern:** + +- Critical data needed for layout decisions (affects positioning) + +- SEO-critical content above the fold + +- Small, fast queries where suspense overhead isn't worth it + +- When you want to avoid layout shift (loading → content jump) + +**Trade-off:** Faster initial paint vs potential layout shift. Choose based on your UX priorities. + +--- + +## 2. Bundle Size Optimization + +**Impact: CRITICAL** + +Reducing initial bundle size improves Time to Interactive and Largest Contentful Paint. + +### 2.1 Avoid Barrel File Imports + +**Impact: CRITICAL (200-800ms import cost, slow builds)** + +Import directly from source files instead of barrel files to avoid loading thousands of unused modules. **Barrel files** are entry points that re-export multiple modules (e.g., `index.js` that does `export * from './module'`). + +Popular icon and component libraries can have **up to 10,000 re-exports** in their entry file. For many React packages, **it takes 200-800ms just to import them**, affecting both development speed and production cold starts. + +**Why tree-shaking doesn't help:** When a library is marked as external (not bundled), the bundler can't optimize it. If you bundle it to enable tree-shaking, builds become substantially slower analyzing the entire module graph. + +**Incorrect: imports entire library** + +```tsx +import { Check, X, Menu } from 'lucide-react' +// Loads 1,583 modules, takes ~2.8s extra in dev +// Runtime cost: 200-800ms on every cold start + +import { Button, TextField } from '@mui/material' +// Loads 2,225 modules, takes ~4.2s extra in dev +``` + +**Correct: imports only what you need** + +```tsx +import Check from 'lucide-react/dist/esm/icons/check' +import X from 'lucide-react/dist/esm/icons/x' +import Menu from 'lucide-react/dist/esm/icons/menu' +// Loads only 3 modules (~2KB vs ~1MB) + +import Button from '@mui/material/Button' +import TextField from '@mui/material/TextField' +// Loads only what you use +``` + +**Alternative: Next.js 13.5+** + +```js +// next.config.js - use optimizePackageImports +module.exports = { + experimental: { + optimizePackageImports: ['lucide-react', '@mui/material'] + } +} + +// Then you can keep the ergonomic barrel imports: +import { Check, X, Menu } from 'lucide-react' +// Automatically transformed to direct imports at build time +``` + +Direct imports provide 15-70% faster dev boot, 28% faster builds, 40% faster cold starts, and significantly faster HMR. + +Libraries commonly affected: `lucide-react`, `@mui/material`, `@mui/icons-material`, `@tabler/icons-react`, `react-icons`, `@headlessui/react`, `@radix-ui/react-*`, `lodash`, `ramda`, `date-fns`, `rxjs`, `react-use`. + +Reference: [https://vercel.com/blog/how-we-optimized-package-imports-in-next-js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js) + +### 2.2 Conditional Module Loading + +**Impact: HIGH (loads large data only when needed)** + +Load large data or modules only when a feature is activated. + +**Example: lazy-load animation frames** + +```tsx +function AnimationPlayer({ enabled, setEnabled }: { enabled: boolean; setEnabled: React.Dispatch> }) { + const [frames, setFrames] = useState(null) + + useEffect(() => { + if (enabled && !frames && typeof window !== 'undefined') { + import('./animation-frames.js') + .then(mod => setFrames(mod.frames)) + .catch(() => setEnabled(false)) + } + }, [enabled, frames, setEnabled]) + + if (!frames) return + return +} +``` + +The `typeof window !== 'undefined'` check prevents bundling this module for SSR, optimizing server bundle size and build speed. + +### 2.3 Defer Non-Critical Third-Party Libraries + +**Impact: MEDIUM (loads after hydration)** + +Analytics, logging, and error tracking don't block user interaction. Load them after hydration. + +**Incorrect: blocks initial bundle** + +```tsx +import { Analytics } from '@vercel/analytics/react' + +export default function RootLayout({ children }) { + return ( + + + {children} + + + + ) +} +``` + +**Correct: loads after hydration** + +```tsx +import dynamic from 'next/dynamic' + +const Analytics = dynamic( + () => import('@vercel/analytics/react').then(m => m.Analytics), + { ssr: false } +) + +export default function RootLayout({ children }) { + return ( + + + {children} + + + + ) +} +``` + +### 2.4 Dynamic Imports for Heavy Components + +**Impact: CRITICAL (directly affects TTI and LCP)** + +Use `next/dynamic` to lazy-load large components not needed on initial render. + +**Incorrect: Monaco bundles with main chunk ~300KB** + +```tsx +import { MonacoEditor } from './monaco-editor' + +function CodePanel({ code }: { code: string }) { + return +} +``` + +**Correct: Monaco loads on demand** + +```tsx +import dynamic from 'next/dynamic' + +const MonacoEditor = dynamic( + () => import('./monaco-editor').then(m => m.MonacoEditor), + { ssr: false } +) + +function CodePanel({ code }: { code: string }) { + return +} +``` + +### 2.5 Preload Based on User Intent + +**Impact: MEDIUM (reduces perceived latency)** + +Preload heavy bundles before they're needed to reduce perceived latency. + +**Example: preload on hover/focus** + +```tsx +function EditorButton({ onClick }: { onClick: () => void }) { + const preload = () => { + if (typeof window !== 'undefined') { + void import('./monaco-editor') + } + } + + return ( + + ) +} +``` + +**Example: preload when feature flag is enabled** + +```tsx +function FlagsProvider({ children, flags }: Props) { + useEffect(() => { + if (flags.editorEnabled && typeof window !== 'undefined') { + void import('./monaco-editor').then(mod => mod.init()) + } + }, [flags.editorEnabled]) + + return + {children} + +} +``` + +The `typeof window !== 'undefined'` check prevents bundling preloaded modules for SSR, optimizing server bundle size and build speed. + +--- + +## 3. Server-Side Performance + +**Impact: HIGH** + +Optimizing server-side rendering and data fetching eliminates server-side waterfalls and reduces response times. + +### 3.1 Authenticate Server Actions Like API Routes + +**Impact: CRITICAL (prevents unauthorized access to server mutations)** + +Server Actions (functions with `"use server"`) are exposed as public endpoints, just like API routes. Always verify authentication and authorization **inside** each Server Action—do not rely solely on middleware, layout guards, or page-level checks, as Server Actions can be invoked directly. + +Next.js documentation explicitly states: "Treat Server Actions with the same security considerations as public-facing API endpoints, and verify if the user is allowed to perform a mutation." + +**Incorrect: no authentication check** + +```typescript +'use server' + +export async function deleteUser(userId: string) { + // Anyone can call this! No auth check + await db.user.delete({ where: { id: userId } }) + return { success: true } +} +``` + +**Correct: authentication inside the action** + +```typescript +'use server' + +import { verifySession } from '@/lib/auth' +import { unauthorized } from '@/lib/errors' + +export async function deleteUser(userId: string) { + // Always check auth inside the action + const session = await verifySession() + + if (!session) { + throw unauthorized('Must be logged in') + } + + // Check authorization too + if (session.user.role !== 'admin' && session.user.id !== userId) { + throw unauthorized('Cannot delete other users') + } + + await db.user.delete({ where: { id: userId } }) + return { success: true } +} +``` + +**With input validation:** + +```typescript +'use server' + +import { verifySession } from '@/lib/auth' +import { z } from 'zod' + +const updateProfileSchema = z.object({ + userId: z.string().uuid(), + name: z.string().min(1).max(100), + email: z.string().email() +}) + +export async function updateProfile(data: unknown) { + // Validate input first + const validated = updateProfileSchema.parse(data) + + // Then authenticate + const session = await verifySession() + if (!session) { + throw new Error('Unauthorized') + } + + // Then authorize + if (session.user.id !== validated.userId) { + throw new Error('Can only update own profile') + } + + // Finally perform the mutation + await db.user.update({ + where: { id: validated.userId }, + data: { + name: validated.name, + email: validated.email + } + }) + + return { success: true } +} +``` + +Reference: [https://nextjs.org/docs/app/guides/authentication](https://nextjs.org/docs/app/guides/authentication) + +### 3.2 Avoid Duplicate Serialization in RSC Props + +**Impact: LOW (reduces network payload by avoiding duplicate serialization)** + +RSC→client serialization deduplicates by object reference, not value. Same reference = serialized once; new reference = serialized again. Do transformations (`.toSorted()`, `.filter()`, `.map()`) in client, not server. + +**Incorrect: duplicates array** + +```tsx +// RSC: sends 6 strings (2 arrays × 3 items) + +``` + +**Correct: sends 3 strings** + +```tsx +// RSC: send once + + +// Client: transform there +'use client' +const sorted = useMemo(() => [...usernames].sort(), [usernames]) +``` + +**Nested deduplication behavior:** + +```tsx +// string[] - duplicates everything +usernames={['a','b']} sorted={usernames.toSorted()} // sends 4 strings + +// object[] - duplicates array structure only +users={[{id:1},{id:2}]} sorted={users.toSorted()} // sends 2 arrays + 2 unique objects (not 4) +``` + +Deduplication works recursively. Impact varies by data type: + +- `string[]`, `number[]`, `boolean[]`: **HIGH impact** - array + all primitives fully duplicated + +- `object[]`: **LOW impact** - array duplicated, but nested objects deduplicated by reference + +**Operations breaking deduplication: create new references** + +- Arrays: `.toSorted()`, `.filter()`, `.map()`, `.slice()`, `[...arr]` + +- Objects: `{...obj}`, `Object.assign()`, `structuredClone()`, `JSON.parse(JSON.stringify())` + +**More examples:** + +```tsx +// ❌ Bad + u.active)} /> + + +// ✅ Good + + +// Do filtering/destructuring in client +``` + +**Exception:** Pass derived data when transformation is expensive or client doesn't need original. + +### 3.3 Cross-Request LRU Caching + +**Impact: HIGH (caches across requests)** + +`React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache. + +**Implementation:** + +```typescript +import { LRUCache } from 'lru-cache' + +const cache = new LRUCache({ + max: 1000, + ttl: 5 * 60 * 1000 // 5 minutes +}) + +export async function getUser(id: string) { + const cached = cache.get(id) + if (cached) return cached + + const user = await db.user.findUnique({ where: { id } }) + cache.set(id, user) + return user +} + +// Request 1: DB query, result cached +// Request 2: cache hit, no DB query +``` + +Use when sequential user actions hit multiple endpoints needing the same data within seconds. + +**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis. + +**In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching. + +Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache) + +### 3.4 Hoist Static I/O to Module Level + +**Impact: HIGH (avoids repeated file/network I/O per request)** + +When loading static assets (fonts, logos, images, config files) in route handlers or server functions, hoist the I/O operation to module level. Module-level code runs once when the module is first imported, not on every request. This eliminates redundant file system reads or network fetches that would otherwise run on every invocation. + +**Incorrect: reads font file on every request** + +**Correct: loads once at module initialization** + +**Alternative: synchronous file reads with Node.js fs** + +**General Node.js example: loading config or templates** + +**When to use this pattern:** + +- Loading fonts for OG image generation + +- Loading static logos, icons, or watermarks + +- Reading configuration files that don't change at runtime + +- Loading email templates or other static templates + +- Any static asset that's the same across all requests + +**When NOT to use this pattern:** + +- Assets that vary per request or user + +- Files that may change during runtime (use caching with TTL instead) + +- Large files that would consume too much memory if kept loaded + +- Sensitive data that shouldn't persist in memory + +**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** Module-level caching is especially effective because multiple concurrent requests share the same function instance. The static assets stay loaded in memory across requests without cold start penalties. + +**In traditional serverless:** Each cold start re-executes module-level code, but subsequent warm invocations reuse the loaded assets until the instance is recycled. + +### 3.5 Minimize Serialization at RSC Boundaries + +**Impact: HIGH (reduces data transfer size)** + +The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses. + +**Incorrect: serializes all 50 fields** + +```tsx +async function Page() { + const user = await fetchUser() // 50 fields + return +} + +'use client' +function Profile({ user }: { user: User }) { + return
{user.name}
// uses 1 field +} +``` + +**Correct: serializes only 1 field** + +```tsx +async function Page() { + const user = await fetchUser() + return +} + +'use client' +function Profile({ name }: { name: string }) { + return
{name}
+} +``` + +### 3.6 Parallel Data Fetching with Component Composition + +**Impact: CRITICAL (eliminates server-side waterfalls)** + +React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching. + +**Incorrect: Sidebar waits for Page's fetch to complete** + +```tsx +export default async function Page() { + const header = await fetchHeader() + return ( +
+
{header}
+ +
+ ) +} + +async function Sidebar() { + const items = await fetchSidebarItems() + return +} +``` + +**Correct: both fetch simultaneously** + +```tsx +async function Header() { + const data = await fetchHeader() + return
{data}
+} + +async function Sidebar() { + const items = await fetchSidebarItems() + return +} + +export default function Page() { + return ( +
+
+ +
+ ) +} +``` + +**Alternative with children prop:** + +```tsx +async function Header() { + const data = await fetchHeader() + return
{data}
+} + +async function Sidebar() { + const items = await fetchSidebarItems() + return +} + +function Layout({ children }: { children: ReactNode }) { + return ( +
+
+ {children} +
+ ) +} + +export default function Page() { + return ( + + + + ) +} +``` + +### 3.7 Per-Request Deduplication with React.cache() + +**Impact: MEDIUM (deduplicates within request)** + +Use `React.cache()` for server-side request deduplication. Authentication and database queries benefit most. + +**Usage:** + +```typescript +import { cache } from 'react' + +export const getCurrentUser = cache(async () => { + const session = await auth() + if (!session?.user?.id) return null + return await db.user.findUnique({ + where: { id: session.user.id } + }) +}) +``` + +Within a single request, multiple calls to `getCurrentUser()` execute the query only once. + +**Avoid inline objects as arguments:** + +`React.cache()` uses shallow equality (`Object.is`) to determine cache hits. Inline objects create new references each call, preventing cache hits. + +**Incorrect: always cache miss** + +```typescript +const getUser = cache(async (params: { uid: number }) => { + return await db.user.findUnique({ where: { id: params.uid } }) +}) + +// Each call creates new object, never hits cache +getUser({ uid: 1 }) +getUser({ uid: 1 }) // Cache miss, runs query again +``` + +**Correct: cache hit** + +```typescript +const params = { uid: 1 } +getUser(params) // Query runs +getUser(params) // Cache hit (same reference) +``` + +If you must pass objects, pass the same reference: + +**Next.js-Specific Note:** + +In Next.js, the `fetch` API is automatically extended with request memoization. Requests with the same URL and options are automatically deduplicated within a single request, so you don't need `React.cache()` for `fetch` calls. However, `React.cache()` is still essential for other async tasks: + +- Database queries (Prisma, Drizzle, etc.) + +- Heavy computations + +- Authentication checks + +- File system operations + +- Any non-fetch async work + +Use `React.cache()` to deduplicate these operations across your component tree. + +Reference: [https://react.dev/reference/react/cache](https://react.dev/reference/react/cache) + +### 3.8 Use after() for Non-Blocking Operations + +**Impact: MEDIUM (faster response times)** + +Use Next.js's `after()` to schedule work that should execute after a response is sent. This prevents logging, analytics, and other side effects from blocking the response. + +**Incorrect: blocks response** + +```tsx +import { logUserAction } from '@/app/utils' + +export async function POST(request: Request) { + // Perform mutation + await updateDatabase(request) + + // Logging blocks the response + const userAgent = request.headers.get('user-agent') || 'unknown' + await logUserAction({ userAgent }) + + return new Response(JSON.stringify({ status: 'success' }), { + status: 200, + headers: { 'Content-Type': 'application/json' } + }) +} +``` + +**Correct: non-blocking** + +```tsx +import { after } from 'next/server' +import { headers, cookies } from 'next/headers' +import { logUserAction } from '@/app/utils' + +export async function POST(request: Request) { + // Perform mutation + await updateDatabase(request) + + // Log after response is sent + after(async () => { + const userAgent = (await headers()).get('user-agent') || 'unknown' + const sessionCookie = (await cookies()).get('session-id')?.value || 'anonymous' + + logUserAction({ sessionCookie, userAgent }) + }) + + return new Response(JSON.stringify({ status: 'success' }), { + status: 200, + headers: { 'Content-Type': 'application/json' } + }) +} +``` + +The response is sent immediately while logging happens in the background. + +**Common use cases:** + +- Analytics tracking + +- Audit logging + +- Sending notifications + +- Cache invalidation + +- Cleanup tasks + +**Important notes:** + +- `after()` runs even if the response fails or redirects + +- Works in Server Actions, Route Handlers, and Server Components + +Reference: [https://nextjs.org/docs/app/api-reference/functions/after](https://nextjs.org/docs/app/api-reference/functions/after) + +--- + +## 4. Client-Side Data Fetching + +**Impact: MEDIUM-HIGH** + +Automatic deduplication and efficient data fetching patterns reduce redundant network requests. + +### 4.1 Deduplicate Global Event Listeners + +**Impact: LOW (single listener for N components)** + +Use `useSWRSubscription()` to share global event listeners across component instances. + +**Incorrect: N instances = N listeners** + +```tsx +function useKeyboardShortcut(key: string, callback: () => void) { + useEffect(() => { + const handler = (e: KeyboardEvent) => { + if (e.metaKey && e.key === key) { + callback() + } + } + window.addEventListener('keydown', handler) + return () => window.removeEventListener('keydown', handler) + }, [key, callback]) +} +``` + +When using the `useKeyboardShortcut` hook multiple times, each instance will register a new listener. + +**Correct: N instances = 1 listener** + +```tsx +import useSWRSubscription from 'swr/subscription' + +// Module-level Map to track callbacks per key +const keyCallbacks = new Map void>>() + +function useKeyboardShortcut(key: string, callback: () => void) { + // Register this callback in the Map + useEffect(() => { + if (!keyCallbacks.has(key)) { + keyCallbacks.set(key, new Set()) + } + keyCallbacks.get(key)!.add(callback) + + return () => { + const set = keyCallbacks.get(key) + if (set) { + set.delete(callback) + if (set.size === 0) { + keyCallbacks.delete(key) + } + } + } + }, [key, callback]) + + useSWRSubscription('global-keydown', () => { + const handler = (e: KeyboardEvent) => { + if (e.metaKey && keyCallbacks.has(e.key)) { + keyCallbacks.get(e.key)!.forEach(cb => cb()) + } + } + window.addEventListener('keydown', handler) + return () => window.removeEventListener('keydown', handler) + }) +} + +function Profile() { + // Multiple shortcuts will share the same listener + useKeyboardShortcut('p', () => { /* ... */ }) + useKeyboardShortcut('k', () => { /* ... */ }) + // ... +} +``` + +### 4.2 Use Passive Event Listeners for Scrolling Performance + +**Impact: MEDIUM (eliminates scroll delay caused by event listeners)** + +Add `{ passive: true }` to touch and wheel event listeners to enable immediate scrolling. Browsers normally wait for listeners to finish to check if `preventDefault()` is called, causing scroll delay. + +**Incorrect:** + +```typescript +useEffect(() => { + const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX) + const handleWheel = (e: WheelEvent) => console.log(e.deltaY) + + document.addEventListener('touchstart', handleTouch) + document.addEventListener('wheel', handleWheel) + + return () => { + document.removeEventListener('touchstart', handleTouch) + document.removeEventListener('wheel', handleWheel) + } +}, []) +``` + +**Correct:** + +```typescript +useEffect(() => { + const handleTouch = (e: TouchEvent) => console.log(e.touches[0].clientX) + const handleWheel = (e: WheelEvent) => console.log(e.deltaY) + + document.addEventListener('touchstart', handleTouch, { passive: true }) + document.addEventListener('wheel', handleWheel, { passive: true }) + + return () => { + document.removeEventListener('touchstart', handleTouch) + document.removeEventListener('wheel', handleWheel) + } +}, []) +``` + +**Use passive when:** tracking/analytics, logging, any listener that doesn't call `preventDefault()`. + +**Don't use passive when:** implementing custom swipe gestures, custom zoom controls, or any listener that needs `preventDefault()`. + +### 4.3 Use SWR for Automatic Deduplication + +**Impact: MEDIUM-HIGH (automatic deduplication)** + +SWR enables request deduplication, caching, and revalidation across component instances. + +**Incorrect: no deduplication, each instance fetches** + +```tsx +function UserList() { + const [users, setUsers] = useState([]) + useEffect(() => { + fetch('/api/users') + .then(r => r.json()) + .then(setUsers) + }, []) +} +``` + +**Correct: multiple instances share one request** + +```tsx +import useSWR from 'swr' + +function UserList() { + const { data: users } = useSWR('/api/users', fetcher) +} +``` + +**For immutable data:** + +```tsx +import { useImmutableSWR } from '@/lib/swr' + +function StaticContent() { + const { data } = useImmutableSWR('/api/config', fetcher) +} +``` + +**For mutations:** + +```tsx +import { useSWRMutation } from 'swr/mutation' + +function UpdateButton() { + const { trigger } = useSWRMutation('/api/user', updateUser) + return +} +``` + +Reference: [https://swr.vercel.app](https://swr.vercel.app) + +### 4.4 Version and Minimize localStorage Data + +**Impact: MEDIUM (prevents schema conflicts, reduces storage size)** + +Add version prefix to keys and store only needed fields. Prevents schema conflicts and accidental storage of sensitive data. + +**Incorrect:** + +```typescript +// No version, stores everything, no error handling +localStorage.setItem('userConfig', JSON.stringify(fullUserObject)) +const data = localStorage.getItem('userConfig') +``` + +**Correct:** + +```typescript +const VERSION = 'v2' + +function saveConfig(config: { theme: string; language: string }) { + try { + localStorage.setItem(`userConfig:${VERSION}`, JSON.stringify(config)) + } catch { + // Throws in incognito/private browsing, quota exceeded, or disabled + } +} + +function loadConfig() { + try { + const data = localStorage.getItem(`userConfig:${VERSION}`) + return data ? JSON.parse(data) : null + } catch { + return null + } +} + +// Migration from v1 to v2 +function migrate() { + try { + const v1 = localStorage.getItem('userConfig:v1') + if (v1) { + const old = JSON.parse(v1) + saveConfig({ theme: old.darkMode ? 'dark' : 'light', language: old.lang }) + localStorage.removeItem('userConfig:v1') + } + } catch {} +} +``` + +**Store minimal fields from server responses:** + +```typescript +// User object has 20+ fields, only store what UI needs +function cachePrefs(user: FullUser) { + try { + localStorage.setItem('prefs:v1', JSON.stringify({ + theme: user.preferences.theme, + notifications: user.preferences.notifications + })) + } catch {} +} +``` + +**Always wrap in try-catch:** `getItem()` and `setItem()` throw in incognito/private browsing (Safari, Firefox), when quota exceeded, or when disabled. + +**Benefits:** Schema evolution via versioning, reduced storage size, prevents storing tokens/PII/internal flags. + +--- + +## 5. Re-render Optimization + +**Impact: MEDIUM** + +Reducing unnecessary re-renders minimizes wasted computation and improves UI responsiveness. + +### 5.1 Calculate Derived State During Rendering + +**Impact: MEDIUM (avoids redundant renders and state drift)** + +If a value can be computed from current props/state, do not store it in state or update it in an effect. Derive it during render to avoid extra renders and state drift. Do not set state in effects solely in response to prop changes; prefer derived values or keyed resets instead. + +**Incorrect: redundant state and effect** + +```tsx +function Form() { + const [firstName, setFirstName] = useState('First') + const [lastName, setLastName] = useState('Last') + const [fullName, setFullName] = useState('') + + useEffect(() => { + setFullName(firstName + ' ' + lastName) + }, [firstName, lastName]) + + return

{fullName}

+} +``` + +**Correct: derive during render** + +```tsx +function Form() { + const [firstName, setFirstName] = useState('First') + const [lastName, setLastName] = useState('Last') + const fullName = firstName + ' ' + lastName + + return

{fullName}

+} +``` + +Reference: [https://react.dev/learn/you-might-not-need-an-effect](https://react.dev/learn/you-might-not-need-an-effect) + +### 5.2 Defer State Reads to Usage Point + +**Impact: MEDIUM (avoids unnecessary subscriptions)** + +Don't subscribe to dynamic state (searchParams, localStorage) if you only read it inside callbacks. + +**Incorrect: subscribes to all searchParams changes** + +```tsx +function ShareButton({ chatId }: { chatId: string }) { + const searchParams = useSearchParams() + + const handleShare = () => { + const ref = searchParams.get('ref') + shareChat(chatId, { ref }) + } + + return +} +``` + +**Correct: reads on demand, no subscription** + +```tsx +function ShareButton({ chatId }: { chatId: string }) { + const handleShare = () => { + const params = new URLSearchParams(window.location.search) + const ref = params.get('ref') + shareChat(chatId, { ref }) + } + + return +} +``` + +### 5.3 Do not wrap a simple expression with a primitive result type in useMemo + +**Impact: LOW-MEDIUM (wasted computation on every render)** + +When an expression is simple (few logical or arithmetical operators) and has a primitive result type (boolean, number, string), do not wrap it in `useMemo`. + +Calling `useMemo` and comparing hook dependencies may consume more resources than the expression itself. + +**Incorrect:** + +```tsx +function Header({ user, notifications }: Props) { + const isLoading = useMemo(() => { + return user.isLoading || notifications.isLoading + }, [user.isLoading, notifications.isLoading]) + + if (isLoading) return + // return some markup +} +``` + +**Correct:** + +```tsx +function Header({ user, notifications }: Props) { + const isLoading = user.isLoading || notifications.isLoading + + if (isLoading) return + // return some markup +} +``` + +### 5.4 Extract Default Non-primitive Parameter Value from Memoized Component to Constant + +**Impact: MEDIUM (restores memoization by using a constant for default value)** + +When memoized component has a default value for some non-primitive optional parameter, such as an array, function, or object, calling the component without that parameter results in broken memoization. This is because new value instances are created on every rerender, and they do not pass strict equality comparison in `memo()`. + +To address this issue, extract the default value into a constant. + +**Incorrect: `onClick` has different values on every rerender** + +```tsx +const UserAvatar = memo(function UserAvatar({ onClick = () => {} }: { onClick?: () => void }) { + // ... +}) + +// Used without optional onClick + +``` + +**Correct: stable default value** + +```tsx +const NOOP = () => {}; + +const UserAvatar = memo(function UserAvatar({ onClick = NOOP }: { onClick?: () => void }) { + // ... +}) + +// Used without optional onClick + +``` + +### 5.5 Extract to Memoized Components + +**Impact: MEDIUM (enables early returns)** + +Extract expensive work into memoized components to enable early returns before computation. + +**Incorrect: computes avatar even when loading** + +```tsx +function Profile({ user, loading }: Props) { + const avatar = useMemo(() => { + const id = computeAvatarId(user) + return + }, [user]) + + if (loading) return + return
{avatar}
+} +``` + +**Correct: skips computation when loading** + +```tsx +const UserAvatar = memo(function UserAvatar({ user }: { user: User }) { + const id = useMemo(() => computeAvatarId(user), [user]) + return +}) + +function Profile({ user, loading }: Props) { + if (loading) return + return ( +
+ +
+ ) +} +``` + +**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, manual memoization with `memo()` and `useMemo()` is not necessary. The compiler automatically optimizes re-renders. + +### 5.6 Narrow Effect Dependencies + +**Impact: LOW (minimizes effect re-runs)** + +Specify primitive dependencies instead of objects to minimize effect re-runs. + +**Incorrect: re-runs on any user field change** + +```tsx +useEffect(() => { + console.log(user.id) +}, [user]) +``` + +**Correct: re-runs only when id changes** + +```tsx +useEffect(() => { + console.log(user.id) +}, [user.id]) +``` + +**For derived state, compute outside effect:** + +```tsx +// Incorrect: runs on width=767, 766, 765... +useEffect(() => { + if (width < 768) { + enableMobileMode() + } +}, [width]) + +// Correct: runs only on boolean transition +const isMobile = width < 768 +useEffect(() => { + if (isMobile) { + enableMobileMode() + } +}, [isMobile]) +``` + +### 5.7 Put Interaction Logic in Event Handlers + +**Impact: MEDIUM (avoids effect re-runs and duplicate side effects)** + +If a side effect is triggered by a specific user action (submit, click, drag), run it in that event handler. Do not model the action as state + effect; it makes effects re-run on unrelated changes and can duplicate the action. + +**Incorrect: event modeled as state + effect** + +```tsx +function Form() { + const [submitted, setSubmitted] = useState(false) + const theme = useContext(ThemeContext) + + useEffect(() => { + if (submitted) { + post('/api/register') + showToast('Registered', theme) + } + }, [submitted, theme]) + + return +} +``` + +**Correct: do it in the handler** + +```tsx +function Form() { + const theme = useContext(ThemeContext) + + function handleSubmit() { + post('/api/register') + showToast('Registered', theme) + } + + return +} +``` + +Reference: [https://react.dev/learn/removing-effect-dependencies#should-this-code-move-to-an-event-handler](https://react.dev/learn/removing-effect-dependencies#should-this-code-move-to-an-event-handler) + +### 5.8 Subscribe to Derived State + +**Impact: MEDIUM (reduces re-render frequency)** + +Subscribe to derived boolean state instead of continuous values to reduce re-render frequency. + +**Incorrect: re-renders on every pixel change** + +```tsx +function Sidebar() { + const width = useWindowWidth() // updates continuously + const isMobile = width < 768 + return