AI SDK
Vercel AI SDK example
The result.toDataStreamResponse
method returns a Response
object, which is forwarded directly to the Next.js route handler. On the client side, you can use the useChat
hook to consume the stream and render chat messages seamlessly, as outlined in the AI SDK documentation.
Result
Code
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, openapi, type VovkRequest } from 'vovk';
2import { jsonSchema, streamText, tool, type CoreMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @openapi({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: CoreMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-4.1-nano'),
24 system: 'You are a helpful assistant.',
25 messages,
26 }).toDataStreamResponse();
27 }
28
29 @openapi({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: CoreMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 parameters: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-4.1-nano'),
63 toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages,
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toDataStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, openapi, type VovkRequest } from 'vovk';
2import { jsonSchema, streamText, tool, type CoreMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @openapi({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: CoreMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-4.1-nano'),
24 system: 'You are a helpful assistant.',
25 messages,
26 }).toDataStreamResponse();
27 }
28
29 @openapi({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: CoreMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 parameters: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-4.1-nano'),
63 toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages,
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toDataStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, openapi, type VovkRequest } from 'vovk';
2import { jsonSchema, streamText, tool, type CoreMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @openapi({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: CoreMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-4.1-nano'),
24 system: 'You are a helpful assistant.',
25 messages,
26 }).toDataStreamResponse();
27 }
28
29 @openapi({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: CoreMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 parameters: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-4.1-nano'),
63 toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages,
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toDataStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, openapi, type VovkRequest } from 'vovk';
2import { jsonSchema, streamText, tool, type CoreMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @openapi({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: CoreMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-4.1-nano'),
24 system: 'You are a helpful assistant.',
25 messages,
26 }).toDataStreamResponse();
27 }
28
29 @openapi({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: CoreMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 parameters: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-4.1-nano'),
63 toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages,
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toDataStreamResponse();
75 }
76}
1'use client';
2import { useChat } from '@ai-sdk/react';
3
4export default function Page() {
5 const { messages, input, handleSubmit, handleInputChange, isLoading, error } = useChat({
6 api: '/api/ai-sdk/function-calling',
7 });
8
9 return (
10 <form onSubmit={handleSubmit}>
11 {messages.map((message, index) => (
12 <div key={index}>
13 {message.role === 'assistant' ? '🤖' : '👤'} {(message.content as string) || '...'}
14 </div>
15 ))}
16 {error && <div>❌ {error.message}</div>}
17 <div className="input-group">
18 <input type="text" placeholder="Send a message..." value={input} onChange={handleInputChange} />
19 <button disabled={isLoading}>Send</button>
20 </div>
21 </form>
22 );
23}
1'use client';
2import { useChat } from '@ai-sdk/react';
3
4export default function Page() {
5 const { messages, input, handleSubmit, handleInputChange, isLoading, error } = useChat({
6 api: '/api/ai-sdk/function-calling',
7 });
8
9 return (
10 <form onSubmit={handleSubmit}>
11 {messages.map((message, index) => (
12 <div key={index}>
13 {message.role === 'assistant' ? '🤖' : '👤'} {(message.content as string) || '...'}
14 </div>
15 ))}
16 {error && <div>❌ {error.message}</div>}
17 <div className="input-group">
18 <input type="text" placeholder="Send a message..." value={input} onChange={handleInputChange} />
19 <button disabled={isLoading}>Send</button>
20 </div>
21 </form>
22 );
23}
1'use client';
2import { useChat } from '@ai-sdk/react';
3
4export default function Page() {
5 const { messages, input, handleSubmit, handleInputChange, isLoading, error } = useChat({
6 api: '/api/ai-sdk/function-calling',
7 });
8
9 return (
10 <form onSubmit={handleSubmit}>
11 {messages.map((message, index) => (
12 <div key={index}>
13 {message.role === 'assistant' ? '🤖' : '👤'} {(message.content as string) || '...'}
14 </div>
15 ))}
16 {error && <div>❌ {error.message}</div>}
17 <div className="input-group">
18 <input type="text" placeholder="Send a message..." value={input} onChange={handleInputChange} />
19 <button disabled={isLoading}>Send</button>
20 </div>
21 </form>
22 );
23}
1'use client';
2import { useChat } from '@ai-sdk/react';
3
4export default function Page() {
5 const { messages, input, handleSubmit, handleInputChange, isLoading, error } = useChat({
6 api: '/api/ai-sdk/function-calling',
7 });
8
9 return (
10 <form onSubmit={handleSubmit}>
11 {messages.map((message, index) => (
12 <div key={index}>
13 {message.role === 'assistant' ? '🤖' : '👤'} {(message.content as string) || '...'}
14 </div>
15 ))}
16 {error && <div>❌ {error.message}</div>}
17 <div className="input-group">
18 <input type="text" placeholder="Send a message..." value={input} onChange={handleInputChange} />
19 <button disabled={isLoading}>Send</button>
20 </div>
21 </form>
22 );
23}
Last updated on