Track your first agent
in 60 seconds.
One wrapper. Four lines. Full visibility into token counts, model costs, latency, retries, and dollar spend — across every agent, workflow, and service you run.
Add Spendara to your project. The SDK wraps your existing AI client calls with zero changes to your logic.
pip install spendara
npm install spendara
pip install spendara langchain
Set SPENDARA_API_KEY as an environment variable. Never hard-code it.
export SPENDARA_API_KEY="spr_live_xxxxxxxxxxxxxxxxxxxxxxxx"
SPENDARA_API_KEY=spr_live_xxxxxxxxxxxxxxxxxxxxxxxx
Add Spendara to your existing code. Pick your runtime below — the wrapper captures model, tokens, latency, retries, and dollar cost without touching your agent logic.
Use the @spendara.track() decorator or the spendara.run() context manager. Both capture the same data.
import spendara
from openai import OpenAI
# Initialize once at startup
spendara.init() # reads SPENDARA_API_KEY from env
client = OpenAI()
@spendara.track(
agent_id="research-agent",
workflow_id="market-analysis-v2",
)
def run_research(query: str) -> str:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": query}]
)
return response.choices[0].message.content
# That's it. Every call now streams cost events to Spendara.
result = run_research("Summarize Q1 earnings for NVDA")
import spendara
import anthropic
spendara.init()
client = anthropic.Anthropic()
# Use context manager for fine-grained run boundaries
def run_agent(task: str) -> str:
with spendara.run(
agent_id="summarizer-agent",
workflow_id="daily-digest",
tags={"env": "production", "team": "content"},
) as run:
message = client.messages.create(
model="claude-opus-4-5",
max_tokens=1024,
messages=[{"role": "user", "content": task}]
)
# run.cost, run.tokens, run.latency_ms available here
return message.content[0].text
Wrap any OpenAI or Anthropic SDK call. Works with async/await and streams.
import { Spendara } from 'spendara';
import OpenAI from 'openai';
// Initialize once
const spendara = new Spendara(); // reads SPENDARA_API_KEY from env
const openai = new OpenAI();
async function runAgent(query: string): Promise<string> {
return spendara.track(
{ agentId: 'research-agent', workflowId: 'market-analysis-v2' },
async (ctx) => {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: query }],
});
// ctx.cost, ctx.tokens, ctx.latencyMs available
return response.choices[0].message.content ?? '';
}
);
}
import { Spendara } from 'spendara';
import Anthropic from '@anthropic-ai/sdk';
const spendara = new Spendara();
const anthropic = new Anthropic();
async function summarize(text: string): Promise<string> {
return spendara.track(
{
agentId: 'summarizer-agent',
workflowId: 'daily-digest',
tags: { env: 'production', team: 'content' },
},
async () => {
const message = await anthropic.messages.create({
model: 'claude-opus-4-5',
max_tokens: 1024,
messages: [{ role: 'user', content: text }],
});
return (message.content[0] as Anthropic.TextBlock).text;
}
);
}
SpendaraCallbackHandler hooks into LangChain's callback system. Every LLM call, tool call, and retrieval is captured automatically — no decorator needed.
import spendara
from spendara.integrations.langchain import SpendaraCallbackHandler
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
spendara.init()
# One handler. Automatically captures every LLM call,
# tool call, retrieval, and chain step in the workflow.
handler = SpendaraCallbackHandler(
agent_id="research-agent",
workflow_id="market-analysis-v2",
tags={"env": "production"},
)
llm = ChatOpenAI(
model="gpt-4o",
callbacks=[handler], # ← that's it
)
response = llm.invoke([HumanMessage(content="What drove NVDA revenue in Q1?")])
print(response.content)
from spendara.integrations.langchain import SpendaraCallbackHandler
from langgraph.graph import StateGraph, END
from langchain_openai import ChatOpenAI
handler = SpendaraCallbackHandler(
agent_id="graph-agent",
workflow_id="multi-step-research",
)
llm = ChatOpenAI(model="gpt-4o", callbacks=[handler])
# Add handler to graph config — all nodes tracked automatically
def research_node(state):
response = llm.invoke(state["messages"])
return {"messages": state["messages"] + [response]}
graph = StateGraph(dict)
graph.add_node("research", research_node)
graph.set_entry_point("research")
graph.add_edge("research", END)
app = graph.compile()
# Each graph invocation = one tracked workflow run in Spendara
result = app.invoke({"messages": [HumanMessage("Analyze market trends")]})
After the first tracked call, your data appears in the Spendara dashboard within seconds. No additional config — agent spend, workflow breakdowns, and anomaly detection are all live.
# Verify events are flowing
spendara verify
# → ✓ API key valid
# → ✓ 3 events received (last: 0.4s ago)
# → ✓ Dashboard: https://spendara.polsia.app/dashboard
Open the live demo dashboard → to see what your data will look like. The demo runs against real mock traffic across 10 agents.
What gets tracked automatically
Spendara captures these fields on every tracked call — no extra instrumentation needed.