Concepts
Understand Iris’s architecture and building blocks. Read Concepts →
This guide walks you through installing Iris, configuring your first provider, and making your first LLM request. By the end, you’ll have a working setup ready for building AI applications.
Before installing Iris, ensure you have:
Verify your Go installation:
go version# go version go1.22.0 darwin/arm64Add Iris to your Go project:
go get github.com/petal-labs/irisThis installs the core SDK with all provider packages.
The CLI provides a command-line interface for quick experiments and prompt testing:
go install github.com/petal-labs/iris/cli/cmd/iris@latestVerify the installation:
iris version# iris v0.12.0Let’s make a simple chat request with OpenAI. This example demonstrates the core workflow: create a provider, wrap it in a client, and use the fluent builder API.
package main
import ( "context" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { // Create provider with API key from environment provider := openai.New(os.Getenv("OPENAI_API_KEY"))
// Wrap in client for retry logic and telemetry client := core.NewClient(provider)
// Build and execute request resp, err := client.Chat("gpt-4o"). System("You are a helpful assistant."). User("What is the capital of France?"). Temperature(0.7). GetResponse(context.Background())
if err != nil { panic(err) }
fmt.Println(resp.Output)}package main
import ( "context" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/anthropic")
func main() { // Anthropic requires both API key and model specification provider := anthropic.New(os.Getenv("ANTHROPIC_API_KEY")) client := core.NewClient(provider)
resp, err := client.Chat("claude-3-5-sonnet-20241022"). System("You are a helpful assistant."). User("What is the capital of France?"). Temperature(0.7). GetResponse(context.Background())
if err != nil { panic(err) }
fmt.Println(resp.Output)}package main
import ( "context" "fmt"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/ollama")
func main() { // Ollama runs locally, no API key needed provider := ollama.New("http://localhost:11434") client := core.NewClient(provider)
resp, err := client.Chat("llama3.2"). System("You are a helpful assistant."). User("What is the capital of France?"). Temperature(0.7). GetResponse(context.Background())
if err != nil { panic(err) }
fmt.Println(resp.Output)}Run your code:
export OPENAI_API_KEY=sk-...go run main.go# The capital of France is Paris.For chat applications and real-time interfaces, streaming delivers tokens as they’re generated:
package main
import ( "context" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { provider := openai.New(os.Getenv("OPENAI_API_KEY")) client := core.NewClient(provider)
// GetStream returns immediately with channels stream, err := client.Chat("gpt-4o"). System("You are a helpful assistant."). User("Write a haiku about programming."). GetStream(context.Background())
if err != nil { panic(err) }
// Process chunks as they arrive for chunk := range stream.Ch { fmt.Print(chunk.Content) // Print without newline for streaming effect } fmt.Println() // Final newline
// Check for errors after stream completes if err := <-stream.Err; err != nil { panic(err) }
// Optional: get the final aggregated response final := <-stream.Final fmt.Printf("\nTotal tokens: %d\n", final.Usage.TotalTokens)}Tools let models invoke functions with structured arguments. Here’s a simple example:
package main
import ( "context" "encoding/json" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { provider := openai.New(os.Getenv("OPENAI_API_KEY")) client := core.NewClient(provider)
// Define a tool weatherTool := core.Tool{ Name: "get_weather", Description: "Get the current weather for a location", Parameters: core.ToolParameters{ Type: "object", Properties: map[string]core.Property{ "location": { Type: "string", Description: "City name, e.g., 'San Francisco, CA'", }, "unit": { Type: "string", Enum: []string{"celsius", "fahrenheit"}, Description: "Temperature unit", }, }, Required: []string{"location"}, }, }
// Request with tool resp, err := client.Chat("gpt-4o"). System("You are a helpful weather assistant."). User("What's the weather in Tokyo?"). Tools(weatherTool). GetResponse(context.Background())
if err != nil { panic(err) }
// Check if model wants to call a tool if len(resp.ToolCalls) > 0 { tc := resp.ToolCalls[0] fmt.Printf("Tool called: %s\n", tc.Name)
var args map[string]string json.Unmarshal(tc.Arguments, &args) fmt.Printf("Arguments: %+v\n", args)
// In a real app, you'd execute the tool and continue the conversation }}See the Tools Guide for complete tool calling patterns including multi-turn conversations with tool results.
Iris provides an encrypted keystore for securely storing API keys locally. This is more secure than environment variables for development machines.
The keystore uses AES-256-GCM encryption with Argon2id key derivation. Set your master password:
export IRIS_KEYSTORE_KEY=your-secure-master-password# Store keys (prompts for value)iris keys set openaiiris keys set anthropiciris keys set gemini
# List stored keys (shows key names, not values)iris keys list# openai# anthropic# gemini
# Remove a keyiris keys remove geminiThe encrypted keystore is stored at ~/.iris/keys.enc with 0600 permissions (owner read/write only).
package main
import ( "context" "fmt" "log"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { // Load key from keystore keystore, err := core.LoadKeystore() if err != nil { log.Fatal(err) }
apiKey, err := keystore.Get("openai") if err != nil { log.Fatal(err) }
// Use the key provider := openai.New(apiKey) client := core.NewClient(provider)
resp, _ := client.Chat("gpt-4o"). User("Hello!"). GetResponse(context.Background())
fmt.Println(resp.Output)}For consistent settings across sessions, create a configuration file at ~/.iris/config.yaml:
# Default provider and modeldefault_provider: openaidefault_model: gpt-4o
# Provider-specific settingsproviders: openai: api_key_env: OPENAI_API_KEY # Or use keystore default_model: gpt-4o organization: org-xxx # Optional
anthropic: api_key_env: ANTHROPIC_API_KEY default_model: claude-3-5-sonnet-20241022
ollama: base_url: http://localhost:11434 default_model: llama3.2
# Telemetry settingstelemetry: enabled: true log_level: info
# Retry policyretry: max_attempts: 3 initial_backoff: 1s max_backoff: 30s backoff_multiplier: 2.0package main
import ( "log"
"github.com/petal-labs/iris/core")
func main() { // Load config from default location (~/.iris/config.yaml) config, err := core.LoadConfig() if err != nil { log.Fatal(err) }
// Create provider from config provider, err := config.GetProvider("openai") if err != nil { log.Fatal(err) }
client := core.NewClient(provider) // Use client...}The CLI provides commands for quick testing and experimentation:
# Simple chatiris chat "Explain quantum entanglement"
# With specific provider/modeliris chat --provider anthropic --model claude-3-opus "Write a poem"
# Streaming outputiris chat --stream "Tell me a story"
# With system promptiris chat --system "You are a pirate" "What's the weather like?"
# With temperatureiris chat --temperature 0.9 "Be creative: invent a new word"
# Multi-turn conversation (interactive mode)iris chat --interactive# Set a keyiris keys set openai
# List keysiris keys list
# Remove a keyiris keys remove openai
# Test a keyiris keys test openai# List available models for a provideriris models list openai
# Get model detailsiris models info gpt-4oIris provides typed errors for different failure modes:
package main
import ( "context" "errors" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { provider := openai.New(os.Getenv("OPENAI_API_KEY")) client := core.NewClient(provider)
resp, err := client.Chat("gpt-4o"). User("Hello"). GetResponse(context.Background())
if err != nil { // Check for specific error types var rateLimitErr *core.RateLimitError var authErr *core.AuthenticationError var contextErr *core.ContextLengthError
switch { case errors.As(err, &rateLimitErr): fmt.Printf("Rate limited. Retry after: %v\n", rateLimitErr.RetryAfter) case errors.As(err, &authErr): fmt.Println("Authentication failed. Check your API key.") case errors.As(err, &contextErr): fmt.Printf("Input too long. Max tokens: %d\n", contextErr.MaxTokens) default: fmt.Printf("Request failed: %v\n", err) } return }
fmt.Println(resp.Output)}Ensure your API key is set correctly:
# Check environment variableecho $OPENAI_API_KEY
# Or check keystoreiris keys listVerify the model name is correct for your provider:
# List available modelsiris models list openaiYour input is too long for the model. Options:
gpt-4o-128k)MaxTokens() builder methodThe provider is throttling requests. Options:
Ensure Ollama is running:
# Start Ollamaollama serve
# Verify it's accessiblecurl http://localhost:11434/api/tagsConcepts
Understand Iris’s architecture and building blocks. Read Concepts →
Providers
Configure and use different LLM providers. Provider Setup →
Streaming Guide
Deep dive into streaming patterns. Streaming Guide →
Tools Guide
Build tool-augmented agents. Tools Guide →