// Create a trace
tracer := langwatch.Tracer("chat-app")
ctx, span := tracer.Start(ctx, "ChatCompletion")
defer span.End()

span.SetType(langwatch.SpanTypeLLM)
span.SetThreadID("conversation-123")
span.RecordInputString("What is the weather like today?")

// Make OpenAI API call
response, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
    Model: openai.ChatModelGPT4oMini,
    Messages: []openai.ChatCompletionMessageParamUnion{
        openai.UserMessage("What is the weather like today?"),
    },
})
// Record the response
span.RecordOutputString(response.Choices[0].Message.Content)
span.SetResponseModel(response.Model)

// Response contains:
// - Content: "I don't have access to real-time weather data..."
// - Model: "gpt-4o-mini-2024-07-18"
// - Usage: {InputTokens: 8, OutputTokens: 15}
This reference provides detailed documentation for all public APIs in the LangWatch Go SDK and its associated instrumentation packages.

Installation

go get github.com/langwatch/langwatch/sdk-go
go get github.com/langwatch/langwatch/sdk-go/instrumentation/openai
For a quick start guide with step-by-step instructions, see the Go Integration Guide. For practical examples of creating traces and spans, see the Core Concepts section in the guide.

Core SDK (langwatch)

This package contains the primary functions for setting up LangWatch and creating traces and spans.

Setup

Setup() initializes the LangWatch OpenTelemetry exporter and sets it as the global tracer provider. It should be called once when your application starts.
func Setup(ctx context.Context) (shutdown func(context.Context))
Parameters:
  • ctx - Context for the setup operation
Returns:
  • shutdown - Function that should be deferred to ensure traces are flushed on exit
Example:
shutdown := langwatch.Setup(context.Background())
defer shutdown(context.Background())
Always call the shutdown function to ensure traces are properly flushed when your application exits.
For a complete setup example with environment variables and error handling, see the Setup section in the integration guide.

Tracer

Tracer() retrieves a LangWatchTracer instance, which is a thin wrapper around an OpenTelemetry Tracer.
func Tracer(instrumentationName string, opts ...trace.TracerOption) LangWatchTracer
ParameterTypeDescription
instrumentationNamestringName of the library or application being instrumented.
opts...trace.TracerOptionOptional OpenTelemetry tracer options (e.g., trace.WithInstrumentationVersion).
Example:
// Basic usage
tracer := langwatch.Tracer("my-app")

// With instrumentation version
tracer := langwatch.Tracer("my-app", 
    trace.WithInstrumentationVersion("1.0.0"))

LangWatchTracer

The LangWatchTracer interface provides a Start method that mirrors OpenTelemetry’s but returns a LangWatchSpan.
type LangWatchTracer interface {
	Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, LangWatchSpan)
}
Example:
tracer := langwatch.Tracer("my-app")
ctx, span := tracer.Start(ctx, "HandleUserRequest")
defer span.End()

// The span is now active and can be used to record data
span.SetType(langwatch.SpanTypeLLM)
span.RecordInputString("User query")

LangWatchSpan

The LangWatchSpan interface embeds the standard trace.Span and adds several helper methods for LangWatch-specific data.
SetType(spanType SpanType)
function
Sets the span type for categorization in LangWatch. This enables specialized UI treatment and analytics.Example:
span.SetType(langwatch.SpanTypeLLM)        // For LLM calls
span.SetType(langwatch.SpanTypeRAG)        // For RAG operations
span.SetType(langwatch.SpanTypeRetrieval)  // For document retrieval
span.SetType(langwatch.SpanTypeTool)       // For tool/function calls
Using span types is optional but highly recommended as it enables LangWatch to provide more tailored insights and visualizations.
SetThreadID(threadID string)
function
Assigns a thread ID to group this trace with a conversation. Useful for multi-turn conversations.Example:
span.SetThreadID("conversation-123")
span.SetThreadID("user-session-abc-def")
All spans within the same trace will share the same thread ID, allowing you to group related interactions together.
SetUserID(userID string)
function
Assigns a user ID to the trace for user-centric analytics and filtering.Example:
span.SetUserID("user-abc-123")
span.SetUserID("customer-xyz-789")
RecordInputString(input string)
function
Records a simple string as the span’s input. Ideal for user queries or simple text inputs.Example:
span.RecordInputString("What is the weather like today?")
span.RecordInputString("User query text")
RecordInput(input any)
function
Records a structured object (e.g., struct, map) as the span’s input, serialized to JSON. Use for complex request objects.Example:
type ChatRequest struct {
    Messages []Message `json:"messages"`
    Model    string   `json:"model"`
    Temperature float64 `json:"temperature"`
}

request := ChatRequest{
    Messages: []Message{{Role: "user", Content: "Hello"}},
    Model: "gpt-4o-mini",
    Temperature: 0.7,
}
span.RecordInput(request)
RecordOutputString(output string)
function
Records a simple string as the span’s output. Ideal for AI responses or simple text outputs.Example:
span.RecordOutputString("The capital of France is Paris.")
span.RecordOutputString("AI response text")
RecordOutput(output any)
function
Records a structured object as the span’s output, serialized to JSON. Use for complex response objects.Example:
type ChatResponse struct {
    Content string `json:"content"`
    Tokens  int    `json:"tokens"`
    Model   string `json:"model"`
}

response := ChatResponse{
    Content: "The capital of France is Paris.",
    Tokens: 8,
    Model: "gpt-4o-mini",
}
span.RecordOutput(response)
SetRequestModel(model string)
function
Sets the model identifier used for a request (e.g., an LLM call). This is the model you requested to use.Example:
span.SetRequestModel("gpt-4o-mini")
span.SetRequestModel("claude-3-sonnet")
span.SetRequestModel("llama-3.1-8b")
SetResponseModel(model string)
function
Sets the model identifier reported in a response. This is the actual model that processed your request.Example:
span.SetResponseModel("gpt-4o-mini-2024-07-18")
span.SetResponseModel("claude-3-sonnet-20240229")
The response model may differ from the request model, especially with OpenAI’s model updates.
SetRAGContextChunks(chunks []SpanRAGContextChunk)
function
Attaches a slice of retrieved context chunks for RAG analysis. This enables LangWatch to analyze the relevance and quality of retrieved documents.Example:
chunks := []langwatch.SpanRAGContextChunk{
    {
        Content: "Paris is the capital of France...",
        Source: "wikipedia-paris",
        Score: 0.95,
    },
    {
        Content: "France is a country in Europe...",
        Source: "wikipedia-france", 
        Score: 0.87,
    },
}
span.SetRAGContextChunks(chunks)

OpenAI Instrumentation

The github.com/langwatch/langwatch/sdk-go/instrumentation/openai package provides middleware for the official openai-go client.
For step-by-step instructions on setting up OpenAI instrumentation, see the OpenAI integration guide.

Middleware

Middleware() creates an openai.Middleware that automatically traces OpenAI API calls.
func Middleware(instrumentationName string, opts ...Option) openai.Middleware
Parameters:
  • instrumentationName - Name of your application or service
  • opts - Optional configuration options
Configuration Options (...Option):
WithCaptureInput()
function
Records the full input payload as a span attribute. This captures the complete request sent to the LLM.Example:
otelopenai.Middleware("my-app", otelopenai.WithCaptureInput())
Enabling input capture may include sensitive data in your traces. Ensure this aligns with your data privacy requirements.
WithCaptureOutput()
function
Records the full response payload as a span attribute. For streams, this is the final accumulated response.Example:
otelopenai.Middleware("my-app", otelopenai.WithCaptureOutput())
This is particularly useful for debugging and understanding what the LLM actually returned.
WithGenAISystem(system string)
function
Sets the gen_ai.system attribute. Useful for identifying providers like "anthropic" or "azure". Defaults to "openai".Example:
// For Anthropic Claude
otelopenai.Middleware("my-app", otelopenai.WithGenAISystem("anthropic"))

// For Azure OpenAI
otelopenai.Middleware("my-app", otelopenai.WithGenAISystem("azure"))
WithTracerProvider(provider trace.TracerProvider)
function
Specifies the trace.TracerProvider to use. Defaults to the global provider.Example:
customProvider := sdktrace.NewTracerProvider(...)
otelopenai.Middleware("my-app", otelopenai.WithTracerProvider(customProvider))

LangWatch Span Types

SpanType is a string constant used with span.SetType() to categorize spans in LangWatch for specialized UI treatment and analytics.
ConstantDescriptionUse Case
SpanTypeLLMA call to a Large Language Model.Direct LLM API calls, chat completions
SpanTypeChainA sequence of related operations or a sub-pipeline.Multi-step processing, workflow orchestration
SpanTypeToolA call to an external tool or function.Function calls, API integrations, database queries
SpanTypeAgentAn autonomous agent’s operation or decision-making step.Agent reasoning, decision points, planning
SpanTypeRAGAn overarching RAG operation, often containing retrieval and LLM spans.Complete RAG workflows
SpanTypeRetrievalThe specific step of retrieving documents from a knowledge base.Vector database queries, document search
SpanTypeQueryA generic database or API query.SQL queries, REST API calls
SpanTypeEmbeddingThe specific step of generating embeddings.Text embedding generation
Using these span types is optional but highly recommended, as it enables LangWatch to provide more tailored insights and visualizations for your traces.

Collected Attributes

The OpenAI instrumentation automatically adds these attributes to spans:

Request Attributes

  • gen_ai.system - AI system name (e.g., “openai”)
  • gen_ai.request.model - Model used for the request
  • gen_ai.request.temperature - Temperature parameter
  • gen_ai.request.top_p - Top-p parameter
  • gen_ai.request.top_k - Top-k parameter
  • gen_ai.request.frequency_penalty - Frequency penalty
  • gen_ai.request.presence_penalty - Presence penalty
  • gen_ai.request.max_tokens - Maximum tokens
  • langwatch.gen_ai.streaming - Boolean indicating streaming
  • gen_ai.operation.name - Operation name (e.g., “completions”)
  • langwatch.input.value - Input content (if WithCaptureInput enabled)

Response Attributes

  • gen_ai.response.id - Response ID from the API
  • gen_ai.response.model - Model that generated the response
  • gen_ai.response.finish_reasons - Completion finish reasons
  • gen_ai.usage.input_tokens - Number of input tokens used
  • gen_ai.usage.output_tokens - Number of output tokens generated
  • gen_ai.openai.response.system_fingerprint - OpenAI system fingerprint
  • langwatch.output.value - Output content (if WithCaptureOutput enabled)

HTTP Attributes

Standard HTTP client attributes are also included:
  • http.request.method - HTTP method
  • url.path - Request path
  • server.address - Server address
  • http.response.status_code - HTTP status code

Request/Response Examples

Basic Chat Completion

// Create a trace
tracer := langwatch.Tracer("chat-app")
ctx, span := tracer.Start(ctx, "ChatCompletion")
defer span.End()

span.SetType(langwatch.SpanTypeLLM)
span.SetThreadID("conversation-123")
span.RecordInputString("What is the weather like today?")

// Make OpenAI API call
response, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
    Model: openai.ChatModelGPT4oMini,
    Messages: []openai.ChatCompletionMessageParamUnion{
        openai.UserMessage("What is the weather like today?"),
    },
})
// Record the response
span.RecordOutputString(response.Choices[0].Message.Content)
span.SetResponseModel(response.Model)

// Response contains:
// - Content: "I don't have access to real-time weather data..."
// - Model: "gpt-4o-mini-2024-07-18"
// - Usage: {InputTokens: 8, OutputTokens: 15}

RAG Pipeline

// Start RAG trace
tracer := langwatch.Tracer("rag-app")
ctx, span := tracer.Start(ctx, "RAGQuery")
defer span.End()

span.SetType(langwatch.SpanTypeRAG)
span.SetThreadID("user-session-456")
span.RecordInputString("How do I implement authentication in Go?")

// Document retrieval span
ctx, retrievalSpan := tracer.Start(ctx, "RetrieveDocuments")
retrievalSpan.SetType(langwatch.SpanTypeRetrieval)
retrievalSpan.RecordInputString("authentication Go implementation")

// ... retrieval logic ...

chunks := []langwatch.SpanRAGContextChunk{
    {Content: documents[0], Source: "auth-guide", Score: 0.92},
    {Content: documents[1], Source: "go-docs", Score: 0.88},
}
retrievalSpan.SetRAGContextChunks(chunks)
retrievalSpan.End()
// LLM call with retrieved context
ctx, llmSpan := tracer.Start(ctx, "GenerateResponse")
llmSpan.SetType(langwatch.SpanTypeLLM)

// ... LLM call with context ...

llmSpan.RecordOutputString("To implement authentication in Go, you can use...")
llmSpan.End()

// Record final RAG response
span.RecordOutputString("Final RAG response with citations...")

Error Handling

All SDK methods handle errors gracefully. In case of failures:
  1. Serialization errors - Fallback to string representation
  2. Network errors - Logged but don’t interrupt application flow
  3. Invalid data - Sanitized or excluded from traces
Example error handling:
func safeRecordInput(span langwatch.LangWatchSpan, input any) {
    defer func() {
        if r := recover(); r != nil {
            // Fallback to string representation
            span.RecordInputString(fmt.Sprintf("%v", input))
        }
    }()
    span.RecordInput(input)
}

Environment Variables

The SDK respects these environment variables:
  • LANGWATCH_API_KEY - Your LangWatch API key (required)
  • LANGWATCH_ENDPOINT - Custom LangWatch endpoint (optional)
  • OTEL_* - Standard OpenTelemetry environment variables

Complete Example

Here’s a comprehensive example showing a complete RAG application with proper error handling and best practices:
package main

import (
    "context"
    "log"
    "os"
    "time"

    langwatch "github.com/langwatch/langwatch/sdk-go"
    otelopenai "github.com/langwatch/langwatch/sdk-go/instrumentation/openai"

    "github.com/openai/openai-go"
    "github.com/openai/openai-go/option"
    "go.opentelemetry.io/otel"
    "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
    sdktrace "go.opentelemetry.io/otel/sdk/trace"
)

func main() {
    ctx := context.Background()
    
    // Setup OpenTelemetry with timeout
    setupCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
    defer cancel()
    
    shutdown := setupLangWatch(setupCtx)
    defer shutdown(ctx)

    // Create instrumented OpenAI client
    client := openai.NewClient(
        option.WithAPIKey(os.Getenv("OPENAI_API_KEY")),
        option.WithMiddleware(otelopenai.Middleware("rag-app",
            otelopenai.WithCaptureInput(),
            otelopenai.WithCaptureOutput(),
        )),
    )

    // Process user query
    if err := processQuery(ctx, client, "How do I implement JWT authentication in Go?"); err != nil {
        log.Fatalf("Failed to process query: %v", err)
    }
}

func processQuery(ctx context.Context, client *openai.Client, query string) error {
    tracer := langwatch.Tracer("rag-app")
    ctx, span := tracer.Start(ctx, "ProcessUserQuery")
    defer span.End()

    span.SetType(langwatch.SpanTypeRAG)
    span.SetThreadID("user-session-" + time.Now().Format("20060102"))
    span.SetUserID("user-123")
    span.RecordInputString(query)

    // Step 1: Retrieve relevant documents
    documents, err := retrieveDocuments(ctx, query)
    if err != nil {
        span.RecordError(err)
        return err
    }

    // Step 2: Generate response with context
    response, err := generateResponse(ctx, client, query, documents)
    if err != nil {
        span.RecordError(err)
        return err
    }

    span.RecordOutputString(response)
    return nil
}

func retrieveDocuments(ctx context.Context, query string) ([]string, error) {
    tracer := langwatch.Tracer("retrieval")
    ctx, span := tracer.Start(ctx, "RetrieveDocuments")
    defer span.End()

    span.SetType(langwatch.SpanTypeRetrieval)
    span.RecordInputString(query)

    // Simulate document retrieval
    documents := []string{
        "JWT tokens are commonly used for authentication...",
        "Use the crypto/bcrypt package for password hashing...",
    }

    chunks := []langwatch.SpanRAGContextChunk{
        {Content: documents[0], Source: "auth-guide", Score: 0.92},
        {Content: documents[1], Source: "go-docs", Score: 0.88},
    }
    span.SetRAGContextChunks(chunks)

    return documents, nil
}

func generateResponse(ctx context.Context, client *openai.Client, query string, documents []string) (string, error) {
    tracer := langwatch.Tracer("llm")
    ctx, span := tracer.Start(ctx, "GenerateResponse")
    defer span.End()

    span.SetType(langwatch.SpanTypeLLM)
    span.SetRequestModel("gpt-4o-mini")

    // Prepare context for LLM
    context := "Context:\n" + documents[0] + "\n" + documents[1]
    
    response, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
        Model: openai.ChatModelGPT4oMini,
        Messages: []openai.ChatCompletionMessageParamUnion{
            openai.SystemMessage("You are a helpful assistant. Use the provided context to answer questions."),
            openai.UserMessage(context + "\n\nQuestion: " + query),
        },
    })
    if err != nil {
        return "", err
    }

    content := response.Choices[0].Message.Content
    span.RecordOutputString(content)
    span.SetResponseModel(response.Model)

    return content, nil
}

func setupLangWatch(ctx context.Context) func(context.Context) {
    apiKey := os.Getenv("LANGWATCH_API_KEY")
    if apiKey == "" {
        log.Fatal("LANGWATCH_API_KEY environment variable not set")
    }

    exporter, err := otlptracehttp.New(ctx,
        otlptracehttp.WithEndpointURL("https://app.langwatch.ai/api/otel/v1/traces"),
        otlptracehttp.WithHeaders(map[string]string{
            "Authorization": "Bearer " + apiKey,
        }),
    )
    if err != nil {
        log.Fatalf("failed to create OTLP exporter: %v", err)
    }

    tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(exporter))
    otel.SetTracerProvider(tp)
    
    return func(ctx context.Context) {
        if err := tp.Shutdown(ctx); err != nil {
            log.Printf("Error shutting down tracer provider: %v", err)
        }
    }
}

Version Compatibility

  • Go Version: 1.19 or later
  • OpenTelemetry: v1.24.0 or later
  • OpenAI Go SDK: Latest version

Support

For additional help:
For common setup issues and troubleshooting tips, see the Troubleshooting section in the integration guide.