LangGraph is a powerful framework for building LLM applications. LangWatch integrates with LangGraph to provide detailed observability into your chains, agents, LLM calls, and tool usage.

The community instrumentors below are for LangChain, but LangGraph is compatible with them.

2. Using Community OpenTelemetry Instrumentors

Dedicated LangChain instrumentors from libraries like OpenInference and OpenLLMetry can also be used to capture LangGraph operations as OpenTelemetry traces, which LangWatch can then ingest.

Instrumenting LangGraph with Dedicated Instrumentors

i. Via langwatch.setup()

# OpenInference Example
import langwatch
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from openinference.instrumentation.langchain import LangchainInstrumentor
import os
import asyncio

langwatch.setup(
    instrumentors=[LangchainInstrumentor()] # Add OpenInference LangchainInstrumentor
)

model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are brief."), ("human", "{question}")]
)
runnable = prompt | model | StrOutputParser()

@langwatch.trace(name="Langchain - OpenInference via Setup")
async def handle_message_oinference_setup(user_question: str):
    response = await runnable.ainvoke({"question": user_question})
    return response

async def main_community_oinference_setup():
    if not os.getenv("OPENAI_API_KEY"):
        print("OPENAI_API_KEY not set. Skipping OpenInference Langchain (setup) example.")
        return
    response = await handle_message_oinference_setup("Explain Langchain instrumentation with OpenInference.")
    print(f"AI (OInference Setup): {response}")

if __name__ == "__main__":
    asyncio.run(main_community_oinference_setup())

ii. Direct Instrumentation

# OpenInference Example
import langwatch
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from openinference.instrumentation.langchain import LangchainInstrumentor
import os
import asyncio

langwatch.setup()
LangchainInstrumentor().instrument() # Instrument Langchain directly

model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages([
    ("system", "You are very brief."), ("human", "{question}")]
)
runnable = prompt | model | StrOutputParser()

@langwatch.trace(name="Langchain - OpenInference Direct")
async def handle_message_oinference_direct(user_question: str):
    response = await runnable.ainvoke({"question": user_question})
    return response

async def main_community_oinference_direct():
    if not os.getenv("OPENAI_API_KEY"):
        print("OPENAI_API_KEY not set. Skipping OpenInference Langchain (direct) example.")
        return
    response = await handle_message_oinference_direct("How does direct Langchain instrumentation work?")
    print(f"AI (OInference Direct): {response}")

if __name__ == "__main__":
    asyncio.run(main_community_oinference_direct())