AI SDK
Vercel AI SDK example
The result.toDataStreamResponse
method returns a Response
object, which is forwarded directly to the Next.js route handler. On the client side, you can use the useChat
hook to consume the stream and render chat messages seamlessly, as outlined in the AI SDK documentation.
Result
Code
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, operation, type VovkRequest } from 'vovk';
2import { jsonSchema, type ModelMessage, streamText, tool, convertToModelMessages, UIMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @operation({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: UIMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-5-nano'),
24 system: 'You are a helpful assistant.',
25 messages: convertToModelMessages(messages),
26 }).toUIMessageStreamResponse();
27 }
28
29 @operation({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: UIMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 inputSchema: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-5-nano'),
63 // toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages: convertToModelMessages(messages),
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toUIMessageStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, operation, type VovkRequest } from 'vovk';
2import { jsonSchema, type ModelMessage, streamText, tool, convertToModelMessages, UIMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @operation({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: UIMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-5-nano'),
24 system: 'You are a helpful assistant.',
25 messages: convertToModelMessages(messages),
26 }).toUIMessageStreamResponse();
27 }
28
29 @operation({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: UIMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 inputSchema: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-5-nano'),
63 // toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages: convertToModelMessages(messages),
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toUIMessageStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, operation, type VovkRequest } from 'vovk';
2import { jsonSchema, type ModelMessage, streamText, tool, convertToModelMessages, UIMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @operation({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: UIMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-5-nano'),
24 system: 'You are a helpful assistant.',
25 messages: convertToModelMessages(messages),
26 }).toUIMessageStreamResponse();
27 }
28
29 @operation({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: UIMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 inputSchema: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-5-nano'),
63 // toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages: convertToModelMessages(messages),
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toUIMessageStreamResponse();
75 }
76}
1import { createLLMTools, HttpException, HttpStatus, KnownAny, post, prefix, operation, type VovkRequest } from 'vovk';
2import { jsonSchema, type ModelMessage, streamText, tool, convertToModelMessages, UIMessage } from 'ai';
3import { openai } from '@ai-sdk/openai';
4import { UserZodRPC } from 'vovk-client';
5
6@prefix('ai-sdk')
7export default class AiSdkController {
8 @operation({
9 summary: 'Vercel AI SDK',
10 description:
11 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to chat with an AI model',
12 })
13 @post('chat')
14 static async chat(req: VovkRequest<{ messages: UIMessage[] }>) {
15 const { messages } = await req.json();
16 const LIMIT = 5;
17
18 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
19 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
20 }
21
22 return streamText({
23 model: openai('gpt-5-nano'),
24 system: 'You are a helpful assistant.',
25 messages: convertToModelMessages(messages),
26 }).toUIMessageStreamResponse();
27 }
28
29 @operation({
30 summary: 'Vercel AI SDK with Function Calling',
31 description:
32 'Uses [@ai-sdk/openai](https://www.npmjs.com/package/@ai-sdk/openai) and ai packages to call a function',
33 })
34 @post('function-calling')
35 static async functionCalling(req: VovkRequest<{ messages: UIMessage[] }>) {
36 const { messages } = await req.json();
37 const LIMIT = 5;
38 const { tools: llmTools } = createLLMTools({
39 modules: { UserZodRPC },
40 onExecute: (d) => console.log('Success', d),
41 onError: (e) => console.error('Error', e),
42 });
43
44 if (messages.filter(({ role }) => role === 'user').length > LIMIT) {
45 throw new HttpException(HttpStatus.BAD_REQUEST, `You can only send ${LIMIT} messages at a time`);
46 }
47
48 const tools = Object.fromEntries(
49 llmTools.map(({ name, execute, description, parameters }) => [
50 name,
51 tool<KnownAny, KnownAny>({
52 execute: async (args, { toolCallId }) => {
53 return execute(args, { toolCallId });
54 },
55 description,
56 inputSchema: jsonSchema(parameters as KnownAny),
57 }),
58 ])
59 );
60
61 return streamText({
62 model: openai('gpt-5-nano'),
63 // toolCallStreaming: true,
64 system:
65 'You are a helpful assistant. Always provide a clear confirmation message after executing any function. Explain what was done and what the results were after the user request is executed.',
66 messages: convertToModelMessages(messages),
67 tools,
68 onError: (e) => console.error('streamText error', e),
69 onFinish: ({ finishReason }) => {
70 if (finishReason === 'tool-calls') {
71 console.log('Tool calls finished');
72 }
73 },
74 }).toUIMessageStreamResponse();
75 }
76}
1'use client';
2import { useChat } from '@ai-sdk/react';
3import { DefaultChatTransport } from 'ai';
4import { useState } from 'react';
5
6export default function Page() {
7 const [input, setInput] = useState('');
8
9 const { messages, sendMessage, error, status } = useChat({
10 transport: new DefaultChatTransport({
11 api: '/api/ai-sdk/chat',
12 }),
13 });
14
15 const handleSubmit = (e: React.FormEvent) => {
16 e.preventDefault();
17 if (input.trim()) {
18 sendMessage({ text: input });
19 setInput('');
20 }
21 };
22
23 return (
24 <form onSubmit={handleSubmit}>
25 {messages.map((message) => (
26 <div key={message.id}>
27 {message.role === 'assistant' ? '🤖' : '👤'}{' '}
28 {message.parts.map((part, partIndex) => (
29 <span key={partIndex}>{part.type === 'text' ? part.text : ''}</span>
30 ))}
31 </div>
32 ))}
33 {error && <div>❌ {error.message}</div>}
34 <div className="input-group">
35 <input type="text" placeholder="Send a message..." value={input} onChange={(e) => setInput(e.target.value)} />
36 <button>Send</button>
37 </div>
38 </form>
39 );
40}
1'use client';
2import { useChat } from '@ai-sdk/react';
3import { DefaultChatTransport } from 'ai';
4import { useState } from 'react';
5
6export default function Page() {
7 const [input, setInput] = useState('');
8
9 const { messages, sendMessage, error, status } = useChat({
10 transport: new DefaultChatTransport({
11 api: '/api/ai-sdk/chat',
12 }),
13 });
14
15 const handleSubmit = (e: React.FormEvent) => {
16 e.preventDefault();
17 if (input.trim()) {
18 sendMessage({ text: input });
19 setInput('');
20 }
21 };
22
23 return (
24 <form onSubmit={handleSubmit}>
25 {messages.map((message) => (
26 <div key={message.id}>
27 {message.role === 'assistant' ? '🤖' : '👤'}{' '}
28 {message.parts.map((part, partIndex) => (
29 <span key={partIndex}>{part.type === 'text' ? part.text : ''}</span>
30 ))}
31 </div>
32 ))}
33 {error && <div>❌ {error.message}</div>}
34 <div className="input-group">
35 <input type="text" placeholder="Send a message..." value={input} onChange={(e) => setInput(e.target.value)} />
36 <button>Send</button>
37 </div>
38 </form>
39 );
40}
1'use client';
2import { useChat } from '@ai-sdk/react';
3import { DefaultChatTransport } from 'ai';
4import { useState } from 'react';
5
6export default function Page() {
7 const [input, setInput] = useState('');
8
9 const { messages, sendMessage, error, status } = useChat({
10 transport: new DefaultChatTransport({
11 api: '/api/ai-sdk/chat',
12 }),
13 });
14
15 const handleSubmit = (e: React.FormEvent) => {
16 e.preventDefault();
17 if (input.trim()) {
18 sendMessage({ text: input });
19 setInput('');
20 }
21 };
22
23 return (
24 <form onSubmit={handleSubmit}>
25 {messages.map((message) => (
26 <div key={message.id}>
27 {message.role === 'assistant' ? '🤖' : '👤'}{' '}
28 {message.parts.map((part, partIndex) => (
29 <span key={partIndex}>{part.type === 'text' ? part.text : ''}</span>
30 ))}
31 </div>
32 ))}
33 {error && <div>❌ {error.message}</div>}
34 <div className="input-group">
35 <input type="text" placeholder="Send a message..." value={input} onChange={(e) => setInput(e.target.value)} />
36 <button>Send</button>
37 </div>
38 </form>
39 );
40}
1'use client';
2import { useChat } from '@ai-sdk/react';
3import { DefaultChatTransport } from 'ai';
4import { useState } from 'react';
5
6export default function Page() {
7 const [input, setInput] = useState('');
8
9 const { messages, sendMessage, error, status } = useChat({
10 transport: new DefaultChatTransport({
11 api: '/api/ai-sdk/chat',
12 }),
13 });
14
15 const handleSubmit = (e: React.FormEvent) => {
16 e.preventDefault();
17 if (input.trim()) {
18 sendMessage({ text: input });
19 setInput('');
20 }
21 };
22
23 return (
24 <form onSubmit={handleSubmit}>
25 {messages.map((message) => (
26 <div key={message.id}>
27 {message.role === 'assistant' ? '🤖' : '👤'}{' '}
28 {message.parts.map((part, partIndex) => (
29 <span key={partIndex}>{part.type === 'text' ? part.text : ''}</span>
30 ))}
31 </div>
32 ))}
33 {error && <div>❌ {error.message}</div>}
34 <div className="input-group">
35 <input type="text" placeholder="Send a message..." value={input} onChange={(e) => setInput(e.target.value)} />
36 <button>Send</button>
37 </div>
38 </form>
39 );
40}
Last updated on