Skip to content

Tools & Function Calls

Tools enable models to invoke external functions with structured arguments, allowing AI applications to interact with APIs, databases, and real-world systems. Iris provides a unified tool calling interface across all providers that support function calling.

  • External data access: Fetch real-time information (weather, stock prices, search results)
  • System integration: Interact with databases, APIs, and internal services
  • Structured output: Get predictable, typed responses for downstream processing
  • Agent capabilities: Build autonomous agents that take actions
  • Reasoning augmentation: Let models use calculators, code interpreters, etc.
ProviderTool CallingParallel ToolsTool ChoiceStreaming Tools
OpenAI
Anthropic
Gemini
xAI
Ollama--

Tools are defined using the core.Tool struct with JSON Schema for parameters:

package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/petal-labs/iris/core"
"github.com/petal-labs/iris/providers/openai"
)
func main() {
provider := openai.New(os.Getenv("OPENAI_API_KEY"))
client := core.NewClient(provider)
// Define a weather tool
weatherTool := core.Tool{
Name: "get_weather",
Description: "Get the current weather for a location",
Parameters: core.ToolParameters{
Type: "object",
Properties: map[string]core.Property{
"location": {
Type: "string",
Description: "City name, e.g., 'San Francisco, CA'",
},
"unit": {
Type: "string",
Enum: []string{"celsius", "fahrenheit"},
Description: "Temperature unit",
},
},
Required: []string{"location"},
},
}
resp, err := client.Chat("gpt-4o").
System("You are a helpful assistant with access to weather data.").
User("What's the weather like in Tokyo?").
Tools(weatherTool).
GetResponse(context.Background())
if err != nil {
panic(err)
}
// Check for tool calls
if len(resp.ToolCalls) > 0 {
for _, tc := range resp.ToolCalls {
fmt.Printf("Tool called: %s\n", tc.Name)
fmt.Printf("Arguments: %s\n", string(tc.Arguments))
}
} else {
fmt.Println(resp.Output)
}
}
// String property
"name": {
Type: "string",
Description: "The user's full name",
}
// String with enum
"status": {
Type: "string",
Enum: []string{"pending", "approved", "rejected"},
Description: "Application status",
}
// Number property
"amount": {
Type: "number",
Description: "Transaction amount in dollars",
}
// Integer property
"count": {
Type: "integer",
Description: "Number of items",
}
// Boolean property
"active": {
Type: "boolean",
Description: "Whether the account is active",
}
// Array property
"tags": {
Type: "array",
Items: &core.Property{
Type: "string",
},
Description: "List of tags",
}
// Nested object
"address": {
Type: "object",
Properties: map[string]core.Property{
"street": {Type: "string"},
"city": {Type: "string"},
"zip": {Type: "string"},
},
Required: []string{"street", "city"},
Description: "Shipping address",
}

Register multiple tools for complex applications:

weatherTool := core.Tool{
Name: "get_weather",
Description: "Get current weather for a location",
Parameters: weatherParams,
}
searchTool := core.Tool{
Name: "search_web",
Description: "Search the web for information",
Parameters: core.ToolParameters{
Type: "object",
Properties: map[string]core.Property{
"query": {
Type: "string",
Description: "Search query",
},
"num_results": {
Type: "integer",
Description: "Number of results to return (1-10)",
},
},
Required: []string{"query"},
},
}
calculatorTool := core.Tool{
Name: "calculate",
Description: "Perform mathematical calculations",
Parameters: core.ToolParameters{
Type: "object",
Properties: map[string]core.Property{
"expression": {
Type: "string",
Description: "Mathematical expression, e.g., '(5 + 3) * 2'",
},
},
Required: []string{"expression"},
},
}
resp, err := client.Chat("gpt-4o").
User("What's the weather in NYC, and what is 15% of $85?").
Tools(weatherTool, searchTool, calculatorTool).
GetResponse(ctx)
type ToolResult struct {
ToolCallID string
Content string
}
func handleToolCalls(toolCalls []core.ToolCall) []ToolResult {
results := make([]ToolResult, len(toolCalls))
for i, tc := range toolCalls {
var result string
switch tc.Name {
case "get_weather":
result = handleWeather(string(tc.Arguments))
case "search_web":
result = handleSearch(string(tc.Arguments))
case "calculate":
result = handleCalculation(string(tc.Arguments))
default:
result = fmt.Sprintf("Unknown tool: %s", tc.Name)
}
results[i] = ToolResult{
ToolCallID: tc.ID,
Content: result,
}
}
return results
}
func handleWeather(argsJSON string) string {
var args struct {
Location string `json:"location"`
Unit string `json:"unit"`
}
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
return fmt.Sprintf("Error parsing arguments: %v", err)
}
// Call your weather API
// ...
return fmt.Sprintf("Weather in %s: 72°F, partly cloudy", args.Location)
}
func handleSearch(argsJSON string) string {
var args struct {
Query string `json:"query"`
NumResults int `json:"num_results"`
}
json.Unmarshal([]byte(argsJSON), &args)
// Perform search
// ...
return "Search results: ..."
}
func handleCalculation(argsJSON string) string {
var args struct {
Expression string `json:"expression"`
}
json.Unmarshal([]byte(argsJSON), &args)
// Evaluate expression safely
// ...
return "Result: 12.75"
}

Create a cleaner interface using generics:

type ToolHandler[T any] interface {
Name() string
Description() string
Parameters() core.ToolParameters
Execute(ctx context.Context, args T) (string, error)
}
type WeatherArgs struct {
Location string `json:"location"`
Unit string `json:"unit"`
}
type WeatherHandler struct{}
func (WeatherHandler) Name() string { return "get_weather" }
func (WeatherHandler) Description() string { return "Get current weather" }
func (WeatherHandler) Parameters() core.ToolParameters {
return core.ToolParameters{
Type: "object",
Properties: map[string]core.Property{
"location": {Type: "string", Description: "City name"},
"unit": {Type: "string", Enum: []string{"celsius", "fahrenheit"}},
},
Required: []string{"location"},
}
}
func (WeatherHandler) Execute(ctx context.Context, args WeatherArgs) (string, error) {
// Implement weather API call
return fmt.Sprintf("Weather in %s: 72°F", args.Location), nil
}
// Tool registry
type ToolRegistry struct {
tools []core.Tool
handlers map[string]func(context.Context, string) (string, error)
}
func NewToolRegistry() *ToolRegistry {
return &ToolRegistry{
handlers: make(map[string]func(context.Context, string) (string, error)),
}
}
func Register[T any](r *ToolRegistry, h ToolHandler[T]) {
r.tools = append(r.tools, core.Tool{
Name: h.Name(),
Description: h.Description(),
Parameters: h.Parameters(),
})
r.handlers[h.Name()] = func(ctx context.Context, argsJSON string) (string, error) {
var args T
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
return "", err
}
return h.Execute(ctx, args)
}
}
func (r *ToolRegistry) Tools() []core.Tool {
return r.tools
}
func (r *ToolRegistry) Execute(ctx context.Context, name, argsJSON string) (string, error) {
handler, ok := r.handlers[name]
if !ok {
return "", fmt.Errorf("unknown tool: %s", name)
}
return handler(ctx, argsJSON)
}

The complete tool calling flow requires multiple turns:

  1. User request → Model decides to call tool(s)
  2. Tool execution → Your code runs the tools
  3. Tool results → Model incorporates results into response
func ChatWithTools(ctx context.Context, client *core.Client, prompt string, tools []core.Tool) (string, error) {
// First turn: User message
resp, err := client.Chat("gpt-4o").
System("You are a helpful assistant.").
User(prompt).
Tools(tools...).
GetResponse(ctx)
if err != nil {
return "", err
}
// If no tool calls, return response directly
if len(resp.ToolCalls) == 0 {
return resp.Output, nil
}
// Execute tool calls
toolResults := make([]core.ToolResult, len(resp.ToolCalls))
for i, tc := range resp.ToolCalls {
result := executeToolCall(tc)
toolResults[i] = core.ToolResult{
ToolCallID: tc.ID,
Content: result,
}
}
// Second turn: Include tool results
finalResp, err := client.Chat("gpt-4o").
System("You are a helpful assistant.").
User(prompt).
Tools(tools...).
AssistantWithToolCalls(resp.Output, resp.ToolCalls).
ToolResults(toolResults...).
GetResponse(ctx)
if err != nil {
return "", err
}
return finalResp.Output, nil
}

For complex tasks requiring multiple tool calls:

func AgentLoop(ctx context.Context, client *core.Client, prompt string, tools []core.Tool, maxIterations int) (string, error) {
builder := client.Chat("gpt-4o").
System("You are a helpful assistant. Use tools when needed.").
User(prompt).
Tools(tools...)
for i := 0; i < maxIterations; i++ {
resp, err := builder.GetResponse(ctx)
if err != nil {
return "", err
}
// No more tool calls - we're done
if len(resp.ToolCalls) == 0 {
return resp.Output, nil
}
// Execute all tool calls
toolResults := make([]core.ToolResult, len(resp.ToolCalls))
for j, tc := range resp.ToolCalls {
result := executeToolCall(tc)
toolResults[j] = core.ToolResult{
ToolCallID: tc.ID,
Content: result,
}
}
// Add assistant response and tool results to conversation
builder = builder.
AssistantWithToolCalls(resp.Output, resp.ToolCalls).
ToolResults(toolResults...)
}
return "", fmt.Errorf("max iterations (%d) reached", maxIterations)
}

Models can request multiple tools simultaneously. Handle them concurrently:

func handleToolCallsParallel(ctx context.Context, toolCalls []core.ToolCall) []core.ToolResult {
results := make([]core.ToolResult, len(toolCalls))
var wg sync.WaitGroup
for i, tc := range toolCalls {
wg.Add(1)
go func(idx int, call core.ToolCall) {
defer wg.Done()
result, err := executeToolCallWithError(ctx, call)
if err != nil {
results[idx] = core.ToolResult{
ToolCallID: call.ID,
Content: fmt.Sprintf("Error: %v", err),
}
return
}
results[idx] = core.ToolResult{
ToolCallID: call.ID,
Content: result,
}
}(i, tc)
}
wg.Wait()
return results
}
func executeToolCallWithError(ctx context.Context, tc core.ToolCall) (string, error) {
switch tc.Name {
case "get_weather":
return fetchWeather(ctx, string(tc.Arguments))
case "search_database":
return searchDatabase(ctx, string(tc.Arguments))
default:
return "", fmt.Errorf("unknown tool: %s", tc.Name)
}
}

Control when and how the model uses tools:

// Auto (default): Model decides whether to use tools
resp, err := client.Chat("gpt-4o").
User(prompt).
Tools(tools...).
ToolChoice(core.ToolChoiceAuto).
GetResponse(ctx)
// None: Disable tool calling for this request
resp, err = client.Chat("gpt-4o").
User(prompt).
Tools(tools...).
ToolChoice(core.ToolChoiceNone).
GetResponse(ctx)
// Required: Force the model to use at least one tool
resp, err = client.Chat("gpt-4o").
User(prompt).
Tools(tools...).
ToolChoice(core.ToolChoiceRequired).
GetResponse(ctx)
// Specific tool: Force a specific tool
resp, err = client.Chat("gpt-4o").
User(prompt).
Tools(tools...).
ToolChoice(core.ToolChoiceFunction("get_weather")).
GetResponse(ctx)

Tool calls can be streamed for real-time feedback:

stream, err := client.Chat("gpt-4o").
System("You are a helpful assistant.").
User("What's the weather in Tokyo and NYC?").
Tools(weatherTool).
GetStream(ctx)
if err != nil {
return err
}
// Process streamed response
for chunk := range stream.Ch {
// Handle content
if chunk.Content != "" {
fmt.Print(chunk.Content)
}
}
if err := <-stream.Err; err != nil {
return err
}
// Get final response with complete tool calls
final := <-stream.Final
// Process completed tool calls
if len(final.ToolCalls) > 0 {
fmt.Println("\nTool calls requested:")
for i, tc := range final.ToolCalls {
fmt.Printf(" [%d] %s: %s\n", i, tc.Name, string(tc.Arguments))
}
}

Some providers offer built-in tools that don’t require custom implementation.

resp, err := client.Chat("gpt-4o").
User("What are the latest AI news today?").
WebSearch().
GetResponse(ctx)
fmt.Println(resp.Output)
resp, err := client.Chat("gpt-4o").
User("Calculate the first 20 Fibonacci numbers and plot them.").
CodeInterpreter().
GetResponse(ctx)
resp, err := client.Chat("claude-3-5-sonnet").
User("Open the browser and search for weather.").
ComputerUse().
GetResponse(ctx)
type ReActAgent struct {
client *core.Client
tools []core.Tool
}
func (a *ReActAgent) Run(ctx context.Context, task string) (string, error) {
systemPrompt := `You are a helpful assistant that uses a Thought-Action-Observation cycle.
For each step:
1. Thought: Reason about what to do next
2. Action: Use a tool if needed, or provide the final answer
3. Observation: Review tool results
Always explain your reasoning before taking action.`
builder := a.client.Chat("gpt-4o").
System(systemPrompt).
User(task).
Tools(a.tools...)
maxSteps := 10
for step := 0; step < maxSteps; step++ {
resp, err := builder.GetResponse(ctx)
if err != nil {
return "", err
}
// Check for final answer
if len(resp.ToolCalls) == 0 {
return resp.Output, nil
}
// Execute tools
results := make([]core.ToolResult, len(resp.ToolCalls))
for i, tc := range resp.ToolCalls {
observation := a.executeTool(ctx, tc)
results[i] = core.ToolResult{
ToolCallID: tc.ID,
Content: observation,
}
}
// Continue conversation
builder = builder.
AssistantWithToolCalls(resp.Output, resp.ToolCalls).
ToolResults(results...)
}
return "", fmt.Errorf("max steps reached without final answer")
}
func (a *ReActAgent) executeTool(ctx context.Context, tc core.ToolCall) string {
// Execute tool and return observation
switch tc.Name {
case "search":
return a.search(string(tc.Arguments))
case "calculate":
return a.calculate(string(tc.Arguments))
default:
return "Unknown tool"
}
}
type PlanExecuteAgent struct {
client *core.Client
tools []core.Tool
}
func (a *PlanExecuteAgent) Run(ctx context.Context, task string) (string, error) {
// Step 1: Create a plan
planResp, err := a.client.Chat("gpt-4o").
System(`You are a planning assistant. Given a task, create a step-by-step plan.
Output a numbered list of steps. Each step should be actionable.`).
User(task).
GetResponse(ctx)
if err != nil {
return "", err
}
plan := planResp.Output
fmt.Printf("Plan:\n%s\n\n", plan)
// Step 2: Execute each step
context := "Plan: " + plan + "\n\nExecuting steps:\n"
builder := a.client.Chat("gpt-4o").
System("You are an executor. Execute each step of the plan using available tools.").
User(context).
Tools(a.tools...)
for i := 0; i < 20; i++ { // Max iterations
resp, err := builder.GetResponse(ctx)
if err != nil {
return "", err
}
// No more tool calls means we're done
if len(resp.ToolCalls) == 0 {
return resp.Output, nil
}
// Execute tools
results := make([]core.ToolResult, len(resp.ToolCalls))
for j, tc := range resp.ToolCalls {
result := a.executeTool(ctx, tc)
results[j] = core.ToolResult{
ToolCallID: tc.ID,
Content: result,
}
}
builder = builder.
AssistantWithToolCalls(resp.Output, resp.ToolCalls).
ToolResults(results...)
}
return "", fmt.Errorf("execution incomplete")
}

Return errors in a way the model can understand:

func executeTool(tc core.ToolCall) core.ToolResult {
result, err := runTool(tc)
if err != nil {
// Return error message as content so model can adapt
return core.ToolResult{
CallID: tc.ID,
Content: fmt.Sprintf("Error executing %s: %v", tc.Name, err),
IsError: true,
}
}
return core.ToolResult{
CallID: tc.ID,
Content: result,
}
}
func handleWeather(argsJSON string) (string, error) {
var args struct {
Location string `json:"location"`
}
if err := json.Unmarshal([]byte(argsJSON), &args); err != nil {
return "", fmt.Errorf("invalid arguments: %w", err)
}
if args.Location == "" {
return "", fmt.Errorf("location is required")
}
// Fetch weather...
return result, nil
}
func executeToolWithTimeout(ctx context.Context, tc core.ToolCall) core.ToolResult {
// Create timeout context for tool execution
toolCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
result, err := runToolAsync(toolCtx, tc)
if errors.Is(err, context.DeadlineExceeded) {
return core.ToolResult{
CallID: tc.ID,
Content: fmt.Sprintf("Tool %s timed out after 10s", tc.Name),
IsError: true,
}
}
if err != nil {
return core.ToolResult{
CallID: tc.ID,
Content: fmt.Sprintf("Error: %v", err),
IsError: true,
}
}
return core.ToolResult{
CallID: tc.ID,
Content: result,
}
}
// Good - specific and actionable
weatherTool := core.Tool{
Name: "get_weather",
Description: "Get the current weather conditions including temperature, humidity, and conditions for a specific city. Use this when the user asks about weather or temperature.",
// ...
}
// Bad - vague
weatherTool := core.Tool{
Name: "weather",
Description: "Weather tool",
// ...
}
"unit": {
Type: "string",
Enum: []string{"celsius", "fahrenheit"},
Description: "Temperature unit. Defaults to celsius if not specified.",
}
"num_results": {
Type: "integer",
Description: "Number of search results to return. Defaults to 5 if not specified. Maximum is 20.",
}
func validateSearchArgs(args SearchArgs) error {
if args.Query == "" {
return errors.New("query cannot be empty")
}
if args.NumResults < 1 {
args.NumResults = 5 // Default
}
if args.NumResults > 20 {
args.NumResults = 20 // Cap
}
return nil
}
// Good - structured, parseable
return fmt.Sprintf(`{"location": %q, "temp_f": %d, "conditions": %q}`,
args.Location, 72, "partly cloudy")
// Also good - clear text
return fmt.Sprintf("Weather in %s: %d°F, %s",
args.Location, 72, "partly cloudy")
// Good - specific tools
searchEmailTool := core.Tool{Name: "search_emails", ...}
sendEmailTool := core.Tool{Name: "send_email", ...}
// Bad - too broad
emailTool := core.Tool{
Name: "email",
Description: "Do anything with emails",
// ...
}