1+ import { tags } from ".." ;
12import { IJsonParseResult } from "./IJsonParseResult" ;
23import { ILlmSchema } from "./ILlmSchema" ;
34import { IValidation } from "./IValidation" ;
@@ -21,11 +22,11 @@ export interface ILlmFunction {
2122 * Function name for LLM invocation.
2223 *
2324 * The identifier used by the LLM to call this function. Must be unique within
24- * the application. OpenAI limits function names to 64 characters.
25+ * the application.
2526 *
26- * @maxLength 64
27+ * OpenAI limits function names to 64 characters.
2728 */
28- name : string ;
29+ name : string & tags . MaxLength < 64 > ;
2930
3031 /**
3132 * Schema for function parameters.
@@ -90,9 +91,8 @@ export interface ILlmFunction {
9091 * Type validation is NOT performed — use {@link validate} after parsing.
9192 *
9293 * If the SDK (e.g., LangChain, Vercel AI, MCP) already parses JSON internally
93- * and provides a pre-parsed object, use `LlmJson.coerce()` from
94- * `@typia/utils` instead to apply schema-based type coercion without
95- * re-parsing.
94+ * and provides a pre-parsed object, use {@link coerce} instead to apply
95+ * schema-based type coercion without re-parsing.
9696 *
9797 * @param str Raw JSON string from LLM output
9898 * @returns Parse result with data on success, or partial data with errors
@@ -104,8 +104,8 @@ export interface ILlmFunction {
104104 *
105105 * **Use this only when the SDK (e.g., LangChain, Vercel AI, MCP) already
106106 * parses JSON internally.** For raw JSON strings from LLM output, use
107- * {@link parse} instead — it handles both lenient parsing and type coercion
108- * in one step.
107+ * {@link parse} instead — it handles both lenient parsing and type coercion in
108+ * one step.
109109 *
110110 * LLMs often return values with incorrect types even after parsing:
111111 *
@@ -132,10 +132,10 @@ export interface ILlmFunction {
132132 * numbers or missing required properties. Use this validator to check
133133 * arguments before execution.
134134 *
135- * When validation fails, use `stringifyValidationFailure()` from
136- * `@typia/utils` to format the error for LLM feedback. The formatted output
137- * shows the invalid JSON with inline error comments, helping the LLM
138- * understand and correct its mistakes in the next turn.
135+ * When validation fails, use { @link LlmJson.stringify} from `@typia/utils` to
136+ * format the error for LLM feedback. The formatted output shows the invalid
137+ * JSON with inline error comments, helping the LLM understand and correct its
138+ * mistakes in the next turn.
139139 *
140140 * @param args The arguments generated by the LLM
141141 * @returns Validation result with success status and any errors
0 commit comments