Tool Middleware
Add logging, caching, and rate limiting. Middleware →
This example demonstrates how to build an autonomous agent using Iris v0.12.0’s tool calling capabilities with manual iteration, parallel tool execution, and conversation memory management.
A support agent that:
┌─────────────┐ ┌─────────────────────────────────────────┐│ User │────▶│ Agent Loop ││ Request │ │ ┌─────────────┐ ┌─────────────┐ │└─────────────┘ │ │ ChatBuilder │ │ ToolExecutor│ │ │ └──────┬──────┘ └──────┬──────┘ │ │ │ │ │ │ ┌────┴────────────────┴────┐ │ │ │ Parallel Execution │ │ │ │ with Goroutines │ │ │ └──────────┬───────────────┘ │ │ │ │ └───────────────┼────────────────────────┘ │ ┌────────────────────┼────────────────────┐ │ │ │ ▼ ▼ ▼ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ Ticket DB │ │ Log Search │ │ Slack │ └──────────────┘ └──────────────┘ └──────────────┘go get github.com/petal-labs/irisiris keys set openaipackage main
import ( "context" "encoding/json" "fmt" "log" "sync" "time"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai" "github.com/petal-labs/iris/tools")
const maxIterations = 10
func main() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel()
// Initialize provider and client provider, err := openai.NewFromKeystore() if err != nil { log.Fatal(err) } client := core.NewClient(provider)
// Create tool registry with middleware registry := tools.NewRegistry( tools.WithRegistryMiddleware( tools.WithLogging(log.Default()), tools.WithTimeout(30*time.Second), ), ) registry.Register(createTicketLookupTool()) registry.Register(createTicketUpdateTool()) registry.Register(createLogSearchTool()) registry.Register(createSlackNotifyTool())
// Run the agent loop result, err := runAgent(ctx, client, registry, "Ticket 1042 is stuck in pending. Investigate and resolve.")
if err != nil { log.Fatal(err) }
fmt.Println("\n=== Final Response ===") fmt.Println(result)}
func runAgent(ctx context.Context, client *core.Client, registry *tools.Registry, request string) (string, error) { systemPrompt := `You are an expert support agent. Investigate issues and resolve them.Use lookup_ticket to get ticket details, search_logs for errors,update_ticket to change status, and notify_slack for alerts.`
// Build initial conversation builder := client.Chat("gpt-4o"). System(systemPrompt). User(request). Tools(registry.List()...)
var totalToolCalls int
for iteration := 0; iteration < maxIterations; iteration++ { fmt.Printf("🔄 Iteration %d starting (tools executed: %d)\n", iteration+1, totalToolCalls)
resp, err := builder.GetResponse(ctx) if err != nil { return "", fmt.Errorf("iteration %d failed: %w", iteration+1, err) }
// No tool calls means the agent is done if len(resp.ToolCalls) == 0 { fmt.Printf("\n📊 Agent completed: %d iterations, %d tool calls\n", iteration+1, totalToolCalls) return resp.Output, nil }
// Execute tool calls in parallel results := executeToolsParallel(ctx, registry, resp.ToolCalls) totalToolCalls += len(resp.ToolCalls)
// Build tool results for next iteration toolResults := make([]core.ToolResult, len(results)) for i, r := range results { toolResults[i] = core.ToolResult{ CallID: r.CallID, Content: r.Content, IsError: r.IsError, } }
// Continue conversation with tool results builder = client.Chat("gpt-4o"). System(systemPrompt). User(request). Tools(registry.List()...). ToolResults(toolResults...) }
return "", fmt.Errorf("max iterations (%d) reached", maxIterations)}
type toolResult struct { CallID string Content any IsError bool}
func executeToolsParallel(ctx context.Context, registry *tools.Registry, calls []core.ToolCall) []toolResult { results := make([]toolResult, len(calls)) var wg sync.WaitGroup
for i, call := range calls { wg.Add(1) go func(idx int, tc core.ToolCall) { defer wg.Done()
fmt.Printf(" 🔧 Calling %s...\n", tc.Name) start := time.Now()
result, err := registry.Execute(ctx, tc.Name, tc.Arguments)
if err != nil { fmt.Printf(" ❌ %s failed: %v\n", tc.Name, err) results[idx] = toolResult{ CallID: tc.ID, Content: fmt.Sprintf("Error: %v", err), IsError: true, } } else { fmt.Printf(" ✅ %s completed in %v\n", tc.Name, time.Since(start)) results[idx] = toolResult{ CallID: tc.ID, Content: result, } } }(i, call) }
wg.Wait() return results}
// Tool creation functionsfunc createTicketLookupTool() tools.Tool { return tools.NewTool("lookup_ticket", "Look up a support ticket by ID to get its current status and details", tools.ObjectSchema( tools.Property("ticket_id", tools.String("The ticket ID")), ).Required("ticket_id"), func(ctx context.Context, args json.RawMessage) (any, error) { var params struct { TicketID string `json:"ticket_id"` } if err := json.Unmarshal(args, ¶ms); err != nil { return nil, err } // Simulated lookup return map[string]any{ "id": params.TicketID, "title": "Payment processing failing", "status": "pending", "priority": "high", "assigned_to": "payments-team", }, nil }, )}
func createTicketUpdateTool() tools.Tool { return tools.NewTool("update_ticket", "Update a ticket's status or add a comment", tools.ObjectSchema( tools.Property("ticket_id", tools.String("The ticket ID")), tools.Property("status", tools.Enum("open", "in_progress", "resolved", "closed")), tools.Property("comment", tools.String("Comment to add")), ).Required("ticket_id"), func(ctx context.Context, args json.RawMessage) (any, error) { var params struct { TicketID string `json:"ticket_id"` Status string `json:"status"` Comment string `json:"comment"` } json.Unmarshal(args, ¶ms) return map[string]any{ "success": true, "message": fmt.Sprintf("Ticket %s updated to %s", params.TicketID, params.Status), }, nil }, )}
func createLogSearchTool() tools.Tool { return tools.NewTool("search_logs", "Search system logs for errors or specific patterns", tools.ObjectSchema( tools.Property("query", tools.String("Search query")), tools.Property("service", tools.String("Filter by service name")), tools.Property("level", tools.Enum("error", "warning", "info", "debug")), ).Required("query"), func(ctx context.Context, args json.RawMessage) (any, error) { // Simulated log search return []map[string]any{ {"level": "error", "service": "payment-gateway", "message": "Stripe API timeout"}, {"level": "error", "service": "payment-gateway", "message": "Circuit breaker opened"}, }, nil }, )}
func createSlackNotifyTool() tools.Tool { return tools.NewTool("notify_slack", "Send a notification to a Slack channel", tools.ObjectSchema( tools.Property("channel", tools.String("Slack channel")), tools.Property("message", tools.String("Message to send")), ).Required("channel", "message"), func(ctx context.Context, args json.RawMessage) (any, error) { var params struct { Channel string `json:"channel"` Message string `json:"message"` } json.Unmarshal(args, ¶ms) fmt.Printf("[Slack] %s: %s\n", params.Channel, params.Message) return map[string]any{"sent": true}, nil }, )}package main
import ( "context" "encoding/json" "fmt" "log" "sync" "time"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai" "github.com/petal-labs/iris/tools")
const maxIterations = 10
func main() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel()
provider, _ := openai.NewFromKeystore() client := core.NewClient(provider)
registry := tools.NewRegistry() registry.Register(createTicketLookupTool()) registry.Register(createLogSearchTool()) registry.Register(createTicketUpdateTool()) registry.Register(createSlackNotifyTool())
// Create conversation with memory conv := core.NewConversation(client, "gpt-4o", core.WithSystemMessage(`You are an expert support agent. Investigate issues and resolve them.Use lookup_ticket to get ticket details, search_logs for errors,update_ticket to change status, and notify_slack for alerts.`), )
// Run agent with conversation memory result, err := runAgentWithMemory(ctx, conv, registry, "Ticket 1042 is stuck in pending. Investigate and resolve.")
if err != nil { log.Fatal(err) }
fmt.Println("\n=== Final Response ===") fmt.Println(result)
fmt.Printf("\n=== Conversation History (%d messages) ===\n", conv.MessageCount())}
func runAgentWithMemory(ctx context.Context, conv *core.Conversation, registry *tools.Registry, request string) (string, error) { // Initial request resp, err := conv.SendWithTools(ctx, request, registry.List()...) if err != nil { return "", err }
for iteration := 0; iteration < maxIterations; iteration++ { if len(resp.ToolCalls) == 0 { return resp.Output, nil }
// Execute tools results := executeToolsParallel(ctx, registry, resp.ToolCalls)
// Convert to ToolResults toolResults := make([]core.ToolResult, len(results)) for i, r := range results { toolResults[i] = core.ToolResult{ CallID: r.CallID, Content: r.Content, IsError: r.IsError, } }
// Continue conversation with tool results resp, err = conv.ContinueWithToolResults(ctx, toolResults, registry.List()...) if err != nil { return "", err } }
return "", fmt.Errorf("max iterations reached")}func runAgentWithStreaming(ctx context.Context, client *core.Client, registry *tools.Registry, request string) (string, error) { systemPrompt := `You are an expert support agent.`
builder := client.Chat("gpt-4o"). System(systemPrompt). User(request). Tools(registry.List()...)
for iteration := 0; iteration < maxIterations; iteration++ { // Use streaming for real-time output stream, err := builder.Stream(ctx) if err != nil { return "", err }
// Print tokens as they arrive for chunk := range stream.Ch { if chunk.Content != "" { fmt.Print(chunk.Content) } }
if err := <-stream.Err; err != nil { return "", err }
// Get final response with tool calls final := <-stream.Final
if len(final.ToolCalls) == 0 { fmt.Println() // Final newline return final.Output, nil }
// Execute tools fmt.Println("\n[Executing tools...]") results := executeToolsParallel(ctx, registry, final.ToolCalls)
// Build next iteration with tool results toolResults := make([]core.ToolResult, len(results)) for i, r := range results { toolResults[i] = core.ToolResult{ CallID: r.CallID, Content: r.Content, IsError: r.IsError, } }
builder = client.Chat("gpt-4o"). System(systemPrompt). User(request). Tools(registry.List()...). ToolResults(toolResults...) }
return "", fmt.Errorf("max iterations reached")}The v0.12.0 approach uses a manual iteration loop:
for iteration := 0; iteration < maxIterations; iteration++ { resp, err := builder.GetResponse(ctx) if err != nil { return "", err }
// No tool calls = agent is done if len(resp.ToolCalls) == 0 { return resp.Output, nil }
// Execute tools and continue results := executeTools(ctx, resp.ToolCalls) builder = builder.ToolResults(results...)}Control the agent loop with these parameters:
| Parameter | Typical Value | Description |
|---|---|---|
maxIterations | 10-15 | Maximum LLM calls before stopping |
| Tool timeout | 30s | Timeout per tool execution |
| Request timeout | 5min | Overall operation timeout |
| Parallel tools | true | Execute independent tools concurrently |
Execute tools concurrently for faster resolution:
func executeToolsParallel(ctx context.Context, registry *tools.Registry, calls []core.ToolCall) []toolResult { results := make([]toolResult, len(calls)) var wg sync.WaitGroup
for i, call := range calls { wg.Add(1) go func(idx int, tc core.ToolCall) { defer wg.Done() result, err := registry.Execute(ctx, tc.Name, tc.Arguments) results[idx] = toolResult{ CallID: tc.ID, Content: result, IsError: err != nil, } }(i, call) }
wg.Wait() return results}Add cross-cutting concerns to all tool executions:
registry := tools.NewRegistry( tools.WithRegistryMiddleware( tools.WithLogging(log.Default()), tools.WithTimeout(30*time.Second), tools.WithRetry(tools.DefaultRetryConfig()), tools.WithCircuitBreaker(tools.DefaultCircuitBreakerConfig()), ),)🔄 Iteration 1 starting (tools executed: 0) 🔧 Calling lookup_ticket... ✅ lookup_ticket completed in 45ms 🔧 Calling search_logs... ✅ search_logs completed in 120ms🔄 Iteration 2 starting (tools executed: 2) 🔧 Calling update_ticket... 🔧 Calling notify_slack... ✅ update_ticket completed in 30ms ✅ notify_slack completed in 85ms
📊 Agent completed: 2 iterations, 4 tool calls
=== Final Response ===I investigated ticket 1042 and found the root cause:
**Issue**: Payment processing failures since 2pm**Root Cause**: Stripe API timeouts caused circuit breaker to open
**Actions Taken**:1. Updated ticket status to "in_progress"2. Notified #payments-oncall channel
**Recommendation**: Check Stripe status page and consider backup processor.| Practice | Recommendation |
|---|---|
| Iteration limits | Set maxIterations to 10-15 to prevent infinite loops |
| Tool timeouts | Use middleware with shorter timeout than overall request |
| Parallel execution | Enable for independent tools, disable for ordered workflows |
| Error handling | Return errors to model so it can adapt strategy |
| Memory management | Use Conversation API for multi-turn interactions |
| Middleware | Add logging and metrics for observability |
Tool Middleware
Add logging, caching, and rate limiting. Middleware →
Memory & Conversations
Manage multi-turn conversations. Memory →
Tools Guide
Deep dive into tool definitions. Tools →