Tools Guide
Learn advanced tool calling patterns. Tools →
OpenAI is the most feature-rich provider in Iris, offering chat completions, streaming, tool calling, image generation (DALL-E), embeddings, and the Responses API for advanced reasoning models.
package main
import ( "context" "fmt" "os"
"github.com/petal-labs/iris/core" "github.com/petal-labs/iris/providers/openai")
func main() { provider := openai.New(os.Getenv("OPENAI_API_KEY")) client := core.NewClient(provider)
resp, err := client.Chat("gpt-4o"). System("You are a helpful assistant."). User("Explain Go interfaces in two sentences."). GetResponse(context.Background())
if err != nil { panic(err) } fmt.Println(resp.Output)}# Store in the encrypted keystore (recommended)iris keys set openai# Prompts for: Enter API key for openai: sk-...export OPENAI_API_KEY=sk-...import "github.com/petal-labs/iris/providers/openai"// From an API key stringprovider := openai.New("sk-...")
// From the OPENAI_API_KEY environment variableprovider, err := openai.NewFromEnv()if err != nil { log.Fatal("OPENAI_API_KEY not set:", err)}
// From the Iris keystore (falls back to environment)provider, err := openai.NewFromKeystore()| Option | Description | Default |
|---|---|---|
WithBaseURL(url) | Override the API base URL | https://api.openai.com/v1 |
WithHTTPClient(client) | Use a custom *http.Client | Default client |
WithOrgID(org) | Set the OpenAI-Organization header | None |
WithProjectID(project) | Set the OpenAI-Project header | None |
WithHeader(key, value) | Add a custom HTTP header | None |
WithTimeout(duration) | Set the request timeout | 30 seconds |
provider := openai.New("sk-...", openai.WithOrgID("org-abc123"), openai.WithProjectID("proj-xyz789"), openai.WithTimeout(60 * time.Second), openai.WithHeader("X-Custom-Header", "value"),)| Feature | Supported | Notes |
|---|---|---|
| Chat | ✓ | All GPT models |
| Streaming | ✓ | Real-time token streaming |
| Tool calling | ✓ | Parallel and sequential |
| Vision | ✓ | Image analysis with GPT-4o |
| Reasoning | ✓ | o1, o3 models with thinking |
| Image generation | ✓ | DALL-E 2 and 3 |
| Embeddings | ✓ | text-embedding-3-small/large |
| Responses API | ✓ | GPT-5+ with web search |
| Model | Context | Best For |
|---|---|---|
gpt-4o | 128K | General purpose, multimodal |
gpt-4o-mini | 128K | Cost-effective, fast |
gpt-4-turbo | 128K | Complex reasoning |
gpt-4 | 8K | Legacy, stable |
gpt-3.5-turbo | 16K | Fast, economical |
| Model | Context | Best For |
|---|---|---|
o1 | 200K | Complex multi-step reasoning |
o1-mini | 128K | Fast reasoning, code |
o1-preview | 128K | Preview reasoning |
o3-mini | 200K | Latest reasoning model |
| Model | Dimensions | Best For |
|---|---|---|
text-embedding-3-large | 3072 | Highest quality |
text-embedding-3-small | 1536 | Cost-effective |
text-embedding-ada-002 | 1536 | Legacy |
| Model | Sizes | Best For |
|---|---|---|
dall-e-3 | 1024x1024, 1792x1024, 1024x1792 | High quality, prompt enhancement |
dall-e-2 | 256x256, 512x512, 1024x1024 | Faster, multiple variations |
resp, err := client.Chat("gpt-4o"). System("You are a helpful coding assistant."). User("Write a function to reverse a string in Go."). Temperature(0.3). MaxTokens(500). GetResponse(ctx)
if err != nil { log.Fatal(err)}fmt.Println(resp.Output)fmt.Printf("Tokens: %d input, %d output\n", resp.Usage.InputTokens, resp.Usage.OutputTokens)Stream responses for real-time output:
stream, err := client.Chat("gpt-4o"). System("You are a helpful assistant."). User("Explain the Go garbage collector."). GetStream(ctx)
if err != nil { log.Fatal(err)}
for chunk := range stream.Ch { fmt.Print(chunk.Content)}fmt.Println()
// Check for streaming errorsif err := <-stream.Err; err != nil { log.Fatal(err)}
// Get final response with usage statsfinal := <-stream.Finalfmt.Printf("Total tokens: %d\n", final.Usage.TotalTokens)func chatHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher) if !ok { http.Error(w, "Streaming not supported", http.StatusInternalServerError) return }
stream, err := client.Chat("gpt-4o"). User(r.URL.Query().Get("prompt")). GetStream(r.Context())
if err != nil { fmt.Fprintf(w, "event: error\ndata: %s\n\n", err.Error()) return }
for chunk := range stream.Ch { data, _ := json.Marshal(map[string]string{"content": chunk.Content}) fmt.Fprintf(w, "event: message\ndata: %s\n\n", data) flusher.Flush() }
fmt.Fprintf(w, "event: done\ndata: {}\n\n") flusher.Flush()}Analyze images with GPT-4o:
// Image from URLresp, err := client.Chat("gpt-4o"). System("You are a helpful image analyst."). UserMultimodal(). Text("What's in this image? Describe it in detail."). ImageURL("https://example.com/photo.jpg"). Done(). GetResponse(ctx)imageData, err := os.ReadFile("photo.png")if err != nil { log.Fatal(err)}base64Data := base64.StdEncoding.EncodeToString(imageData)
resp, err := client.Chat("gpt-4o"). UserMultimodal(). Text("Analyze this image."). ImageBase64(base64Data, "image/png"). Done(). GetResponse(ctx)resp, err := client.Chat("gpt-4o"). UserMultimodal(). Text("Compare these two images. What are the differences?"). ImageURL("https://example.com/before.jpg"). ImageURL("https://example.com/after.jpg"). Done(). GetResponse(ctx)Control token usage with detail level:
// High detail for text extraction, diagramsresp, err := client.Chat("gpt-4o"). UserMultimodal(). Text("Read all the text in this document."). ImageURL(docURL, core.ImageDetailHigh). Done(). GetResponse(ctx)
// Low detail for simple classification (fewer tokens)resp, err := client.Chat("gpt-4o"). UserMultimodal(). Text("Is this a cat or a dog?"). ImageURL(petURL, core.ImageDetailLow). Done(). GetResponse(ctx)Define and use tools for function calling:
// Define a toolweatherTool := core.Tool{ Name: "get_weather", Description: "Get current weather for a location", Parameters: map[string]interface{}{ "type": "object", "properties": map[string]interface{}{ "location": map[string]interface{}{ "type": "string", "description": "City name, e.g., 'San Francisco, CA'", }, "unit": map[string]interface{}{ "type": "string", "enum": []string{"celsius", "fahrenheit"}, }, }, "required": []string{"location"}, },}
// First request - model decides to call toolresp, err := client.Chat("gpt-4o"). System("You are a helpful assistant with access to weather data."). User("What's the weather in Tokyo?"). Tools(weatherTool). GetResponse(ctx)
if len(resp.ToolCalls) > 0 { // Execute the tool call := resp.ToolCalls[0] var args struct { Location string `json:"location"` Unit string `json:"unit"` } json.Unmarshal([]byte(call.Arguments), &args)
// Get weather data (your implementation) weatherData := getWeather(args.Location, args.Unit)
// Continue conversation with tool result finalResp, err := client.Chat("gpt-4o"). System("You are a helpful assistant with access to weather data."). User("What's the weather in Tokyo?"). Tools(weatherTool). Assistant(resp.Output). ToolCall(call.ID, call.Name, call.Arguments). ToolResult(call.ID, weatherData). GetResponse(ctx)
fmt.Println(finalResp.Output)}GPT-4o can call multiple tools in parallel:
resp, err := client.Chat("gpt-4o"). User("What's the weather in Tokyo for today and the stock price of AAPL?"). Tools(weatherTool, stockTool). ToolChoice(core.ToolChoiceAuto). GetResponse(ctx)
// Handle multiple tool callsfor _, call := range resp.ToolCalls { switch call.Name { case "get_weather": // Handle weather case "get_stock_price": // Handle stock }}Use reasoning models for complex multi-step problems:
resp, err := client.Chat("o1"). User("Solve this step by step: If a train travels at 60 mph..."). GetResponse(ctx)
// Access reasoning/thinking contentif resp.Reasoning != "" { fmt.Println("Thinking:", resp.Reasoning)}fmt.Println("Answer:", resp.Output)The Responses API provides enhanced capabilities for advanced models:
// Using the Responses API with web searchresp, err := client.Chat("gpt-5"). User("What are the latest developments in Go 1.23?"). WebSearch(true). // Enable real-time web search GetResponse(ctx)
// Access citations from web searchfor _, citation := range resp.Citations { fmt.Printf("Source: %s - %s\n", citation.Title, citation.URL)}The Responses API supports built-in tools:
resp, err := client.Chat("gpt-5"). User("Find the current stock price of Tesla and create a chart."). WebSearch(true). CodeInterpreter(true). GetResponse(ctx)Generate images from text prompts:
resp, err := provider.GenerateImage(ctx, &core.ImageGenerateRequest{ Model: "dall-e-3", Prompt: "A serene Japanese garden with cherry blossoms and a koi pond", Size: core.ImageSize1024x1024, Quality: core.ImageQualityHD, Style: core.ImageStyleVivid, N: 1,})
if err != nil { log.Fatal(err)}
fmt.Printf("Image URL: %s\n", resp.Images[0].URL)fmt.Printf("Revised prompt: %s\n", resp.Images[0].RevisedPrompt)resp, err := provider.GenerateImage(ctx, &core.ImageGenerateRequest{ Model: "dall-e-3", Prompt: "A futuristic cityscape at sunset", Size: core.ImageSize1792x1024, ResponseFormat: core.ImageResponseFormatB64JSON,})
// Decode and saveimageData, _ := base64.StdEncoding.DecodeString(resp.Images[0].B64JSON)os.WriteFile("generated.png", imageData, 0644)Edit existing images with DALL-E 2:
imageData, _ := os.ReadFile("original.png")maskData, _ := os.ReadFile("mask.png") // White areas will be edited
resp, err := provider.EditImage(ctx, &core.ImageEditRequest{ Model: "dall-e-2", Prompt: "Add a red balloon floating in the sky", Image: imageData, Mask: maskData, Size: core.ImageSize1024x1024,})Generate embeddings for semantic search and RAG:
resp, err := provider.Embeddings(ctx, &core.EmbeddingRequest{ Model: "text-embedding-3-large", Input: []core.EmbeddingInput{ {Text: "Iris is a Go SDK for LLM providers."}, {Text: "OpenAI provides GPT models for text generation."}, },})
if err != nil { log.Fatal(err)}
for i, emb := range resp.Embeddings { fmt.Printf("Embedding %d: %d dimensions\n", i, len(emb.Values))}Reduce embedding dimensions for storage efficiency:
resp, err := provider.Embeddings(ctx, &core.EmbeddingRequest{ Model: "text-embedding-3-large", Input: inputs, Dimensions: 256, // Reduce from 3072 to 256})Force JSON output:
type Analysis struct { Sentiment string `json:"sentiment"` Topics []string `json:"topics"` Confidence float64 `json:"confidence"`}
resp, err := client.Chat("gpt-4o"). System("Analyze the sentiment and topics of the text. Respond in JSON."). User("I love how Go handles concurrency! The goroutines are amazing."). ResponseFormat(core.ResponseFormatJSON). GetResponse(ctx)
var analysis Analysisjson.Unmarshal([]byte(resp.Output), &analysis)fmt.Printf("Sentiment: %s, Confidence: %.2f\n", analysis.Sentiment, analysis.Confidence)Enforce a specific JSON schema:
resp, err := client.Chat("gpt-4o"). System("Extract person information from the text."). User("John Smith is 35 years old and works as a software engineer in San Francisco."). ResponseFormatSchema(core.JSONSchema{ Name: "person", Schema: map[string]interface{}{ "type": "object", "properties": map[string]interface{}{ "name": map[string]string{"type": "string"}, "age": map[string]string{"type": "integer"}, "occupation": map[string]string{"type": "string"}, "city": map[string]string{"type": "string"}, }, "required": []string{"name", "age"}, }, }). GetResponse(ctx)Build conversation history:
builder := client.Chat("gpt-4o"). System("You are a helpful Go programming tutor.")
// First turnresp1, _ := builder.User("What are goroutines?").GetResponse(ctx)fmt.Println("Assistant:", resp1.Output)
// Second turn with historyresp2, _ := builder. Assistant(resp1.Output). User("How do I wait for multiple goroutines to complete?"). GetResponse(ctx)fmt.Println("Assistant:", resp2.Output)
// Third turnresp3, _ := builder. Assistant(resp1.Output). User("How do I wait for multiple goroutines to complete?"). Assistant(resp2.Output). User("Show me an example with error handling."). GetResponse(ctx)fmt.Println("Assistant:", resp3.Output)resp, err := client.Chat("gpt-4o").User(prompt).GetResponse(ctx)if err != nil { var apiErr *core.APIError if errors.As(err, &apiErr) { switch apiErr.StatusCode { case 400: log.Printf("Bad request: %s", apiErr.Message) case 401: log.Fatal("Invalid API key") case 429: log.Printf("Rate limited. Retry after: %s", apiErr.RetryAfter) // Implement backoff case 500, 502, 503: log.Printf("OpenAI service error: %s", apiErr.Message) // Use fallback provider default: log.Printf("API error %d: %s", apiErr.StatusCode, apiErr.Message) } return }
// Context errors if errors.Is(err, context.DeadlineExceeded) { log.Println("Request timed out") } else if errors.Is(err, context.Canceled) { log.Println("Request canceled") } else { log.Printf("Unexpected error: %v", err) }}Use Azure-hosted OpenAI models:
provider := openai.New("your-azure-key", openai.WithBaseURL("https://your-resource.openai.azure.com/openai/deployments/gpt-4o"), openai.WithHeader("api-key", "your-azure-key"), openai.WithAzure(true),)
// Or use environment variables// AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com// AZURE_OPENAI_API_KEY=your-azure-keyprovider, err := openai.NewAzureFromEnv("gpt-4o")| Task | Recommended Model |
|---|---|
| General chat, Q&A | gpt-4o-mini |
| Complex reasoning | gpt-4o or o1 |
| Code generation | gpt-4o |
| Fast responses | gpt-4o-mini |
| Vision/images | gpt-4o |
| Embeddings | text-embedding-3-small |
// Set max_tokens to limit response lengthresp, err := client.Chat("gpt-4o"). User(prompt). MaxTokens(500). GetResponse(ctx)
// Use low detail for simple image tasksbuilder.ImageURL(url, core.ImageDetailLow)
// Use smaller embedding models when possibleresp, err := provider.Embeddings(ctx, &core.EmbeddingRequest{ Model: "text-embedding-3-small", // vs text-embedding-3-large Input: inputs,})client := core.NewClient(provider, core.WithRetryPolicy(&core.RetryPolicy{ MaxRetries: 3, InitialInterval: 1 * time.Second, MaxInterval: 30 * time.Second, BackoffMultiplier: 2.0, RetryOn: []int{429, 500, 502, 503}, }),)// Never log or expose API keysprovider := openai.New(os.Getenv("OPENAI_API_KEY"))
// Keys are stored as core.Secret internally// fmt.Println(provider.apiKey) // Prints "[REDACTED]"core.Secret to prevent accidental loggingTools Guide
Learn advanced tool calling patterns. Tools →
Streaming Guide
Master streaming responses. Streaming →
Images Guide
Work with vision and image generation. Images →
Providers Overview
Compare all available providers. Providers →