toVercelTools() function
undefined
export function toVercelTools(props: {
controllers: Array<ILlmController | IHttpLlmController>;
prefix?: boolean | undefined;
}): Record<string, Tool>;Vercel AI SDK integration for typia.
toVercelTools() converts TypeScript classes or OpenAPI documents into Vercel AI SDK Record<string, Tool> at once.
Every class method becomes a tool, JSDoc comments become tool descriptions, and TypeScript types become JSON schemas — all at compile time. For OpenAPI documents, every API endpoint is converted to a Vercel tool with schemas from the specification.
Validation feedback is embedded automatically.
Setup
npm install @typia/vercel ai
npm install typia
npx typia setupFrom TypeScript Class
Vercel AI Tools
import { openai } from "@ai-sdk/openai";
import { toVercelTools } from "@typia/vercel";
import { generateText, GenerateTextResult, Tool } from "ai";
import typia from "typia";
import { Calculator } from "./Calculator";
const tools: Record<string, Tool> = toVercelTools({
controllers: [
typia.llm.controller<Calculator>("calculator", new Calculator()),
],
});
const result: GenerateTextResult = await generateText({
model: openai("gpt-4o"),
prompt: "What is 10 + 5?",
tools,
});Create controllers from TypeScript classes with typia.llm.controller<Class>(), and pass them to toVercelTools().
controllers: Array of controllers created viatypia.llm.controller<Class>()orHttpLlm.controller()prefix: Whentrue(default), tool names are formatted as{controllerName}_{methodName}. Set tofalseto use bare method names
From OpenAPI Document
import { toVercelTools } from "@typia/vercel";
import { HttpLlm } from "@typia/utils";
import { Tool } from "ai";
const tools: Record<string, Tool> = toVercelTools({
controllers: [
HttpLlm.controller({
name: "shopping",
document: await fetch(
"https://shopping-be.wrtn.ai/editor/swagger.json",
).then((r) => r.json()),
connection: {
host: "https://shopping-be.wrtn.ai",
headers: { Authorization: "Bearer ********" },
},
}),
],
});Create controllers from OpenAPI documents with HttpLlm.controller(), and pass them to toVercelTools().
name: Controller name used as prefix for tool namesdocument: Swagger/OpenAPI document (v2.0, v3.0, or v3.1)connection: HTTP connection info includinghostand optionalheaders
Validation Feedback
Validation Test
import type { Tool } from "ai";
import { TestValidator } from "@nestia/e2e";
import { ILlmController, IValidation } from "@typia/interface";
import { stringifyValidationFailure } from "@typia/utils";
import { toVercelTools } from "@typia/vercel";
import typia from "typia";
import { Calculator } from "../structures/Calculator";
export const test_vercel_class_controller_validation =
async (): Promise<void> => {
// 1. Create class-based controller using typia.llm.controller
const controller: ILlmController<Calculator> =
typia.llm.controller<Calculator>("calculator", new Calculator());
// 2. Convert to Vercel tools
const tools: Record<string, Tool> = toVercelTools({
controllers: [controller],
});
// 3. Test validation failure: wrong type (string instead of number)
const addTool: Tool = tools["calculator_add"]!;
const result: unknown = await addTool.execute!(
{ x: "not a number", y: 5 },
{ toolCallId: "test-1", messages: [], abortSignal: undefined as any },
);
// 4. Verify the result contains validation error
const expected: IValidation = typia.validate<Calculator.IProps>({
x: "not a number",
y: 5,
});
if (expected.success === true)
throw new Error("Expected validation to fail, but it succeeded.");
const expectedMessage: string = stringifyValidationFailure(expected);
TestValidator.predicate("result should be an error object", () => {
const res = result as { error?: boolean; message?: string };
return res.error === true && typeof res.message === "string";
});
TestValidator.predicate("error message should contain validation info", () => {
const res = result as { error: boolean; message: string };
return res.message === expectedMessage;
});
};import { ILlmApplication, ILlmFunction, IValidation } from "@samchon/openapi";
import { FunctionCall } from "pseudo";
export const correctFunctionCall = (props: {
functionCall: FunctionCall;
application: ILlmApplication<"chatgpt">;
retry: (reason: string, errors?: IValidation.IError[]) => Promise<unknown>;
}): Promise<unknown> => {
// FIND FUNCTION
const func: ILlmFunction<"chatgpt"> | undefined =
props.application.functions.find((f) => f.name === call.name);
if (func === undefined) {
// never happened in my experience
return props.retry(
"Unable to find the matched function name. Try it again.",
);
}
// VALIDATE
const result: IValidation<unknown> = func.validate(
props.functionCall.arguments,
);
if (result.success === false) {
// 1st trial: 30% (gpt-4o-mini in shopping mall chatbot)
// 2nd trial with validation feedback: 99%
// 3nd trial with validation feedback again: never have failed
return props.retry(
"Type errors are detected. Correct it through validation errors",
{
errors: result.errors,
},
);
}
return result.data;
};When LLM sends { x: "not a number", y: 5 }, the result is returned as { error: true, message: "..." } with detailed validation errors from stringifyValidationFailure(). The LLM reads this and self-corrects on the next turn.
In my experience, OpenAI gpt-4o-mini makes type-level mistakes about 70% of the time on complex schemas (Shopping Mall service). With validation feedback, the success rate jumps from 30% to 99% on the second attempt. Third attempt has never failed.
The embedded typia.validate<T>() creates validation logic by analyzing TypeScript source codes and types at the compilation level — more accurate and detailed than any runtime validator.
| Components | typia | TypeBox | ajv | io-ts | zod | C.V. |
|---|---|---|---|---|---|---|
| Easy to use | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Object (simple) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| Object (hierarchical) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| Object (recursive) | ✔ | ❌ | ✔ | ✔ | ✔ | ✔ |
| Object (union, implicit) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Object (union, explicit) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌ |
| Object (additional tags) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| Object (template literal types) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌ |
| Object (dynamic properties) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌ |
| Array (rest tuple) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Array (hierarchical) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| Array (recursive) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌ |
| Array (recursive, union) | ✔ | ✔ | ❌ | ✔ | ✔ | ❌ |
| Array (R+U, implicit) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Array (repeated) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Array (repeated, union) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
| Ultimate Union Type | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
C.V.meansclass-validator
This validation feedback strategy also covers restriction properties:
string:minLength,maxLength,pattern,format,contentMediaTypenumber:minimum,maximum,exclusiveMinimum,exclusiveMaximum,multipleOfarray:minItems,maxItems,uniqueItems,items
Structured Output
Use typia.llm.parameters<T>() with Vercel’s jsonSchema() to generate structured output with validation:
import { openai } from "@ai-sdk/openai";
import { dedent, stringifyValidationFailure } from "@typia/utils";
import { generateObject, jsonSchema } from "ai";
import typia, { tags } from "typia";
interface IMember {
email: string & tags.Format<"email">;
name: string;
age: number & tags.Minimum<0> & tags.Maximum<100>;
hobbies: string[];
joined_at: string & tags.Format<"date">;
}
const { object } = await generateObject({
model: openai("gpt-4o"),
schema: jsonSchema<IMember>(typia.llm.parameters<IMember>(), {
validate: (value) => {
const result = typia.validate<IMember>(value);
if (result.success) return { success: true, value: result.data };
return {
success: false,
error: new Error(stringifyValidationFailure(result)),
};
},
}),
prompt: dedent`
I am a new member of the community.
My name is John Doe, and I am 25 years old.
I like playing basketball and reading books,
and joined to this community at 2022-01-01.
`,
});Terminal{ email: 'john.doe@example.com', name: 'John Doe', age: 25, hobbies: [ 'playing basketball', 'reading books' ], joined_at: '2022-01-01' }
The IMember interface is the single source of truth. typia.llm.parameters<IMember>() generates the JSON schema, and typia.validate<IMember>() validates the output — all from the same type.
Error Handling
Error Handling Test
import type { Tool } from "ai";
import { TestValidator } from "@nestia/e2e";
import { ILlmController } from "@typia/interface";
import { toVercelTools } from "@typia/vercel";
import typia from "typia";
import { Calculator } from "../structures/Calculator";
export const test_vercel_class_controller_error_handling =
async (): Promise<void> => {
// 1. Create class-based controller using typia.llm.controller
const controller: ILlmController<Calculator> =
typia.llm.controller<Calculator>("calculator", new Calculator());
// 2. Convert to Vercel tools
const tools: Record<string, Tool> = toVercelTools({
controllers: [controller],
});
// 3. Test divide by zero (throws an error)
const divideTool: Tool = tools["calculator_divide"]!;
const result: unknown = await divideTool.execute!(
{ x: 10, y: 0 },
{ toolCallId: "test-1", messages: [], abortSignal: undefined as any },
);
// 4. Verify the result contains error
TestValidator.predicate("result should be an error object", () => {
const res = result as { error?: boolean; message?: string };
return res.error === true && typeof res.message === "string";
});
TestValidator.predicate("error message should contain division by zero", () => {
const res = result as { error: boolean; message: string };
return res.message.includes("Division by zero");
});
};When a tool execution throws a runtime error (e.g., division by zero), @typia/vercel catches the exception and returns { error: true, message: "Error: Division by zero is not allowed" }. This is different from validation errors — validation errors indicate wrong argument types, while runtime errors indicate the function itself failed.