-
Notifications
You must be signed in to change notification settings - Fork 363
Add TS examples for function tools and agent as a tool #680
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
c79f2f2
4847f67
f52610c
0d408ec
2e194ba
dd26857
b7e44a0
751f14b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,72 @@ | ||
| import { | ||
| AgentTool, | ||
| InMemoryRunner, | ||
| LlmAgent, | ||
| } from '@google/adk'; | ||
|
|
||
| /** | ||
| * This example demonstrates how to use an agent as a tool. | ||
| */ | ||
| async function main() { | ||
| // Define the summarization agent that will be used as a tool | ||
| const summaryAgent = new LlmAgent({ | ||
| name: 'summary_agent', | ||
| model: 'gemini-2.5-flash', | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we stay consistent with python and use 'gemini-2.0-flash'? I can go either way on this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we should use 2.5 model because python library and code examples were created a while ago. |
||
| description: 'Agent to summarize text', | ||
| instruction: | ||
| 'You are an expert summarizer. Please read the following text and provide a concise summary.', | ||
| }); | ||
|
|
||
| // Define the main agent that uses the summarization agent as a tool. | ||
| // skipSummarization is set to true, so the main_agent will directly output | ||
| // the result from the summary_agent without further processing. | ||
| const mainAgent = new LlmAgent({ | ||
| name: 'main_agent', | ||
| model: 'gemini-2.5-flash', | ||
| instruction: | ||
| "You are a helpful assistant. When the user provides a text, use the 'summary_agent' tool to generate a summary. Always forward the user's message exactly as received to the 'summary_agent' tool, without modifying or summarizing it yourself. Present the response from the tool to the user.", | ||
| tools: [new AgentTool({agent: summaryAgent, skipSummarization: true})], | ||
| }); | ||
|
|
||
| const appName = 'agent-as-a-tool-app'; | ||
| const runner = new InMemoryRunner({agent: mainAgent, appName}); | ||
|
|
||
| const longText = `Quantum computing represents a fundamentally different approach to computation, | ||
| leveraging the bizarre principles of quantum mechanics to process information. Unlike classical computers | ||
| that rely on bits representing either 0 or 1, quantum computers use qubits which can exist in a state of superposition - effectively | ||
| being 0, 1, or a combination of both simultaneously. Furthermore, qubits can become entangled, | ||
| meaning their fates are intertwined regardless of distance, allowing for complex correlations. This parallelism and | ||
| interconnectedness grant quantum computers the potential to solve specific types of incredibly complex problems - such | ||
| as drug discovery, materials science, complex system optimization, and breaking certain types of cryptography - far | ||
| faster than even the most powerful classical supercomputers could ever achieve, although the technology is still largely in its developmental stages.`; | ||
|
|
||
| // Create the session before running the agent | ||
| await runner.sessionService.createSession({ | ||
| appName, | ||
| userId: 'user1', | ||
| sessionId: 'session1', | ||
| }); | ||
|
|
||
| // Run the agent with the long text to summarize | ||
| const events = await runner.runSync({ | ||
| userId: 'user1', | ||
| sessionId: 'session1', | ||
| newMessage: { | ||
| role: 'user', | ||
| parts: [{text: longText}], | ||
| }, | ||
| }); | ||
|
|
||
| // Print the final response from the agent | ||
| console.log('Agent Response:'); | ||
| for (const event of events) { | ||
| if (event.isFinalResponse() && event.content?.parts) { | ||
| const responsePart = event.content.parts.find(p => p.functionResponse); | ||
| if (responsePart && responsePart.functionResponse) { | ||
| console.log(responsePart.functionResponse.response); | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| main(); | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,68 @@ | ||
|
|
||
| import {Content, Part} from '@google/genai'; | ||
| import { | ||
| FunctionTool, | ||
| InMemoryRunner, | ||
| LlmAgent, | ||
| } from '@google/adk'; | ||
| import {z} from 'zod'; | ||
|
|
||
| // Define the function to get the stock price | ||
| async function getStockPrice({ticker}: {ticker: string}): Promise<Record<string, unknown>> { | ||
| console.log(`Getting stock price for ${ticker}`); | ||
| // In a real-world scenario, you would fetch the stock price from an API | ||
| const price = (Math.random() * 1000).toFixed(2); | ||
| return {price: `$${price}`}; | ||
| } | ||
|
|
||
| async function main() { | ||
| // Define the schema for the tool's parameters using Zod | ||
| const getStockPriceSchema = z.object({ | ||
| ticker: z.string().describe('The stock ticker symbol to look up.'), | ||
| }); | ||
|
|
||
| // Create a FunctionTool from the function and schema | ||
| const stockPriceTool = new FunctionTool({ | ||
| name: 'getStockPrice', | ||
| description: 'Gets the current price of a stock.', | ||
| parameters: getStockPriceSchema, | ||
| execute: getStockPrice, | ||
| }); | ||
|
|
||
| // Define the agent that will use the tool | ||
| const stockAgent = new LlmAgent({ | ||
| name: 'stock_agent', | ||
| model: 'gemini-2.5-flash', | ||
| instruction: 'You can get the stock price of a company.', | ||
| tools: [stockPriceTool], | ||
| }); | ||
|
|
||
| // Create a runner for the agent | ||
| const runner = new InMemoryRunner({agent: stockAgent}); | ||
|
|
||
| // Create a new session | ||
| const session = await runner.sessionService.createSession({ | ||
| appName: runner.appName, | ||
| userId: 'test-user', | ||
| }); | ||
|
|
||
| const userContent: Content = { | ||
| role: 'user', | ||
| parts: [{text: 'What is the stock price of GOOG?'},], | ||
| }; | ||
|
|
||
| // Run the agent and get the response | ||
| const response = await runner.runSync({ | ||
| userId: session.userId, | ||
| sessionId: session.id, | ||
| newMessage: userContent, | ||
| }); | ||
|
|
||
| // Print the final response from the agent | ||
| const finalResponse = response[response.length - 1]; | ||
| if (finalResponse?.content?.parts) { | ||
| console.log(finalResponse.content.parts.map((part: Part) => part.text).join('')); | ||
| } | ||
| } | ||
|
|
||
| main(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Would it make sense to put these two examples in separate subdirectories? Also, should not we include a
tsconfig.jsonand apackage.jsonfor them to make sure the users can quickly run them?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We should create a subdirectory for each feature md file i.e function-tools.md and then within this subdirectory, just the ts files for each example. Because I checked the path structure for function-tools in adk-docs/examples/python/snippets - tools is a directory. function-tools is the subdirectory. No need to create one more layer of subdirectories for each example ts files, does it make sense?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
And yes we agree we will add a tsconfig.json and a package.json for each feature so each sample code is self-contained.