Files
agentlens/apps/web/src/lib/demo-data.ts
Vectry 64c827ee84 feat: add command palette, accessibility, scroll animations, demo workspace, and keyboard navigation
- COMP-139: Command palette for quick navigation
- COMP-140: Accessibility improvements
- COMP-141: Scroll animations with animate-on-scroll component
- COMP-143: Demo workspace with seed data and demo banner
- COMP-145: Keyboard navigation and shortcuts help

Ultraworked with [Sisyphus](https://github.com/code-yeongyu/oh-my-Claude)

Co-authored-by: Sisyphus <clio-agent@sisyphuslabs.ai>
2026-02-10 18:06:36 +00:00

555 lines
17 KiB
TypeScript

import { prisma } from "@/lib/prisma";
import type { Prisma, SpanType } from "@agentlens/database";
type EventCreate = Prisma.EventCreateWithoutTraceInput;
type DecisionCreate = Prisma.DecisionPointCreateWithoutTraceInput;
interface DemoSpan {
id: string;
name: string;
type: SpanType;
status: "RUNNING" | "COMPLETED" | "ERROR";
parentSpanId?: string;
input?: Prisma.InputJsonValue;
output?: Prisma.InputJsonValue;
tokenCount?: number;
costUsd?: number;
durationMs?: number;
startedAt: Date;
endedAt?: Date;
metadata?: Prisma.InputJsonValue;
statusMessage?: string;
}
function daysAgo(days: number, offsetMs = 0): Date {
const d = new Date();
d.setDate(d.getDate() - days);
d.setMilliseconds(d.getMilliseconds() + offsetMs);
return d;
}
function endDate(start: Date, durationMs: number): Date {
return new Date(start.getTime() + durationMs);
}
export async function seedDemoData(userId: string) {
const traces = [
createSimpleChatTrace(userId),
createMultiToolAgentTrace(userId),
createRagPipelineTrace(userId),
createErrorHandlingTrace(userId),
createLongRunningWorkflowTrace(userId),
createCodeAnalysisTrace(userId),
createWebSearchTrace(userId),
];
for (const traceFn of traces) {
const { trace, spans, events, decisions } = traceFn;
await prisma.trace.create({
data: {
...trace,
spans: { create: spans },
events: { create: events },
decisionPoints: { create: decisions },
},
});
}
await prisma.user.update({
where: { id: userId },
data: { demoSeeded: true },
});
}
function createSimpleChatTrace(userId: string) {
const start = daysAgo(1);
const duration = 1240;
const spanId = `demo-span-chat-${userId.slice(0, 8)}`;
return {
trace: {
name: "Simple Chat Completion",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["openai", "chat"],
metadata: { model: "gpt-4o", temperature: 0.7 },
totalCost: 0.0032,
totalTokens: 245,
totalDuration: duration,
startedAt: start,
endedAt: endDate(start, duration),
},
spans: [
{
id: spanId,
name: "chat.completions.create",
type: "LLM_CALL" as const,
status: "COMPLETED" as const,
input: { messages: [{ role: "user", content: "Explain quantum computing in simple terms" }] },
output: { content: "Quantum computing uses quantum bits (qubits) that can exist in multiple states simultaneously..." },
tokenCount: 245,
costUsd: 0.0032,
durationMs: duration,
startedAt: start,
endedAt: endDate(start, duration),
metadata: { model: "gpt-4o", provider: "openai" },
},
],
events: [] as EventCreate[],
decisions: [] as DecisionCreate[],
};
}
function createMultiToolAgentTrace(userId: string) {
const start = daysAgo(2);
const parentId = `demo-span-agent-${userId.slice(0, 8)}`;
const toolIds = [
`demo-span-tool1-${userId.slice(0, 8)}`,
`demo-span-tool2-${userId.slice(0, 8)}`,
`demo-span-tool3-${userId.slice(0, 8)}`,
];
const llmId = `demo-span-llm-${userId.slice(0, 8)}`;
return {
trace: {
name: "Multi-Tool Agent Run",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["agent", "tools", "production"],
metadata: { agent: "research-assistant", run_id: "demo-run-001" },
totalCost: 0.0187,
totalTokens: 1823,
totalDuration: 8420,
startedAt: start,
endedAt: endDate(start, 8420),
},
spans: [
{
id: parentId,
name: "research-assistant",
type: "AGENT" as const,
status: "COMPLETED" as const,
durationMs: 8420,
startedAt: start,
endedAt: endDate(start, 8420),
metadata: { max_iterations: 5 },
},
{
id: toolIds[0],
name: "web_search",
type: "TOOL_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: parentId,
input: { query: "latest AI research papers 2026" },
output: { results: [{ title: "Scaling Laws for Neural Language Models", url: "https://arxiv.org/..." }] },
durationMs: 2100,
startedAt: endDate(start, 200),
endedAt: endDate(start, 2300),
},
{
id: toolIds[1],
name: "document_reader",
type: "TOOL_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: parentId,
input: { url: "https://arxiv.org/..." },
output: { content: "Abstract: We study empirical scaling laws for language model performance..." },
durationMs: 1800,
startedAt: endDate(start, 2400),
endedAt: endDate(start, 4200),
},
{
id: toolIds[2],
name: "summarizer",
type: "TOOL_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: parentId,
input: { text: "Abstract: We study empirical scaling laws..." },
output: { summary: "The paper examines how language model performance scales with compute, data, and model size." },
durationMs: 1500,
startedAt: endDate(start, 4300),
endedAt: endDate(start, 5800),
},
{
id: llmId,
name: "gpt-4o-synthesis",
type: "LLM_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: parentId,
input: { messages: [{ role: "system", content: "Synthesize research findings" }] },
output: { content: "Based on the latest research, AI scaling laws suggest..." },
tokenCount: 1823,
costUsd: 0.0187,
durationMs: 2400,
startedAt: endDate(start, 5900),
endedAt: endDate(start, 8300),
metadata: { model: "gpt-4o" },
},
],
events: [] as EventCreate[],
decisions: [
{
type: "TOOL_SELECTION" as const,
reasoning: "User asked about latest AI research, need web search to get current information",
chosen: { tool: "web_search", args: { query: "latest AI research papers 2026" } },
alternatives: [{ tool: "memory_lookup" }, { tool: "knowledge_base" }],
parentSpanId: parentId,
durationMs: 150,
costUsd: 0.001,
timestamp: endDate(start, 100),
},
{
type: "ROUTING" as const,
reasoning: "Search results contain arxiv links, routing to document reader for full content",
chosen: { next_step: "document_reader" },
alternatives: [{ next_step: "direct_response" }, { next_step: "ask_clarification" }],
parentSpanId: parentId,
durationMs: 80,
costUsd: 0.0005,
timestamp: endDate(start, 2350),
},
],
};
}
function createRagPipelineTrace(userId: string) {
const start = daysAgo(3);
const retrievalId = `demo-span-retrieval-${userId.slice(0, 8)}`;
const embeddingId = `demo-span-embed-${userId.slice(0, 8)}`;
const genId = `demo-span-gen-${userId.slice(0, 8)}`;
return {
trace: {
name: "RAG Pipeline",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["rag", "retrieval", "embeddings"],
metadata: { pipeline: "knowledge-qa", version: "2.1" },
totalCost: 0.0091,
totalTokens: 892,
totalDuration: 4350,
startedAt: start,
endedAt: endDate(start, 4350),
},
spans: [
{
id: embeddingId,
name: "embed_query",
type: "LLM_CALL" as const,
status: "COMPLETED" as const,
input: { text: "How does our refund policy work?" },
output: { embedding: [0.023, -0.041, 0.089] },
tokenCount: 12,
costUsd: 0.00001,
durationMs: 320,
startedAt: start,
endedAt: endDate(start, 320),
metadata: { model: "text-embedding-3-small" },
},
{
id: retrievalId,
name: "vector_search",
type: "MEMORY_OP" as const,
status: "COMPLETED" as const,
input: { embedding: [0.023, -0.041, 0.089], top_k: 5 },
output: { documents: [{ id: "doc-1", score: 0.92, title: "Refund Policy v3" }] },
durationMs: 180,
startedAt: endDate(start, 400),
endedAt: endDate(start, 580),
metadata: { index: "company-docs", results_count: 5 },
},
{
id: genId,
name: "generate_answer",
type: "LLM_CALL" as const,
status: "COMPLETED" as const,
input: { messages: [{ role: "system", content: "Answer using the provided context" }] },
output: { content: "Our refund policy allows returns within 30 days of purchase..." },
tokenCount: 880,
costUsd: 0.009,
durationMs: 3600,
startedAt: endDate(start, 650),
endedAt: endDate(start, 4250),
metadata: { model: "gpt-4o-mini" },
},
],
events: [] as EventCreate[],
decisions: [
{
type: "MEMORY_RETRIEVAL" as const,
reasoning: "Query about refund policy matched knowledge base with high confidence",
chosen: { source: "vector_search", confidence: 0.92 },
alternatives: [{ source: "web_search" }, { source: "ask_human" }],
durationMs: 50,
timestamp: endDate(start, 350),
},
],
};
}
function createErrorHandlingTrace(userId: string) {
const start = daysAgo(5);
const spanId = `demo-span-err-${userId.slice(0, 8)}`;
return {
trace: {
name: "Error Handling Example",
userId,
status: "ERROR" as const,
isDemo: true,
tags: ["error", "rate-limit"],
metadata: { error_type: "RateLimitError", retries: 3 },
totalCost: 0.0,
totalTokens: 0,
totalDuration: 15200,
startedAt: start,
endedAt: endDate(start, 15200),
},
spans: [
{
id: spanId,
name: "chat.completions.create",
type: "LLM_CALL" as const,
status: "ERROR" as const,
statusMessage: "RateLimitError: Rate limit exceeded. Retry after 30s.",
input: { messages: [{ role: "user", content: "Analyze this dataset" }] },
durationMs: 15200,
startedAt: start,
endedAt: endDate(start, 15200),
metadata: { model: "gpt-4o", retry_count: 3 },
},
],
events: [
{
type: "ERROR" as const,
name: "RateLimitError",
spanId,
metadata: { message: "Rate limit exceeded", status_code: 429 },
timestamp: endDate(start, 5000),
},
{
type: "RETRY" as const,
name: "Retry attempt 1",
spanId,
metadata: { attempt: 1, backoff_ms: 2000 },
timestamp: endDate(start, 7000),
},
{
type: "RETRY" as const,
name: "Retry attempt 2",
spanId,
metadata: { attempt: 2, backoff_ms: 4000 },
timestamp: endDate(start, 11000),
},
{
type: "ERROR" as const,
name: "Max retries exceeded",
spanId,
metadata: { message: "Giving up after 3 retries", final_status: 429 },
timestamp: endDate(start, 15200),
},
],
decisions: [
{
type: "RETRY" as const,
reasoning: "Received 429 rate limit error, exponential backoff strategy selected",
chosen: { action: "retry", strategy: "exponential_backoff", max_retries: 3 },
alternatives: [{ action: "fail_immediately" }, { action: "switch_model" }],
durationMs: 20,
timestamp: endDate(start, 5100),
},
],
};
}
function createLongRunningWorkflowTrace(userId: string) {
const start = daysAgo(6);
const totalDuration = 34500;
const chainId = `demo-span-chain-${userId.slice(0, 8)}`;
const spanPrefix = `demo-span-wf-${userId.slice(0, 8)}`;
const stepNames = [
"data_ingestion",
"preprocessing",
"feature_extraction",
"model_inference",
"post_processing",
"validation",
"output_formatting",
];
const spans: DemoSpan[] = [
{
id: chainId,
name: "data-processing-pipeline",
type: "CHAIN",
status: "COMPLETED",
durationMs: totalDuration,
startedAt: start,
endedAt: endDate(start, totalDuration),
metadata: { pipeline: "batch-analysis", version: "1.4" },
},
];
let elapsed = 200;
for (let i = 0; i < stepNames.length; i++) {
const stepDuration = 2000 + Math.floor(Math.random() * 5000);
spans.push({
id: `${spanPrefix}-${i}`,
name: stepNames[i],
type: i === 3 ? "LLM_CALL" : "CUSTOM",
status: "COMPLETED",
durationMs: stepDuration,
startedAt: endDate(start, elapsed),
endedAt: endDate(start, elapsed + stepDuration),
metadata: { step: i + 1, total_steps: stepNames.length },
});
elapsed += stepDuration + 100;
}
return {
trace: {
name: "Long-Running Workflow",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["pipeline", "batch", "production"],
metadata: { pipeline: "batch-analysis", records_processed: 1250 },
totalCost: 0.042,
totalTokens: 4200,
totalDuration: totalDuration,
startedAt: start,
endedAt: endDate(start, totalDuration),
},
spans: spans.map((s) => ({
...s,
parentSpanId: s.id === chainId ? undefined : chainId,
})),
events: [] as EventCreate[],
decisions: [
{
type: "PLANNING" as const,
reasoning: "Large dataset detected, selecting batch processing strategy with parallel feature extraction",
chosen: { strategy: "batch_parallel", batch_size: 50 },
alternatives: [{ strategy: "sequential" }, { strategy: "streaming" }],
parentSpanId: chainId,
durationMs: 100,
timestamp: endDate(start, 100),
},
],
};
}
function createCodeAnalysisTrace(userId: string) {
const start = daysAgo(4);
const agentId = `demo-span-codeagent-${userId.slice(0, 8)}`;
const readId = `demo-span-read-${userId.slice(0, 8)}`;
const analyzeId = `demo-span-analyze-${userId.slice(0, 8)}`;
return {
trace: {
name: "Code Review Agent",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["code-review", "agent"],
metadata: { repo: "acme/backend", pr_number: 142 },
totalCost: 0.015,
totalTokens: 1450,
totalDuration: 6200,
startedAt: start,
endedAt: endDate(start, 6200),
},
spans: [
{
id: agentId,
name: "code-review-agent",
type: "AGENT" as const,
status: "COMPLETED" as const,
durationMs: 6200,
startedAt: start,
endedAt: endDate(start, 6200),
},
{
id: readId,
name: "read_diff",
type: "TOOL_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: agentId,
input: { pr_number: 142 },
output: { files_changed: 5, additions: 120, deletions: 30 },
durationMs: 800,
startedAt: endDate(start, 100),
endedAt: endDate(start, 900),
},
{
id: analyzeId,
name: "analyze_code",
type: "LLM_CALL" as const,
status: "COMPLETED" as const,
parentSpanId: agentId,
input: { diff: "...", instructions: "Review for bugs and style issues" },
output: { review: "Found 2 potential issues: 1) Missing null check on line 45, 2) Unused import" },
tokenCount: 1450,
costUsd: 0.015,
durationMs: 5100,
startedAt: endDate(start, 1000),
endedAt: endDate(start, 6100),
metadata: { model: "gpt-4o" },
},
],
events: [] as EventCreate[],
decisions: [
{
type: "TOOL_SELECTION" as const,
reasoning: "Need to read PR diff before analyzing code",
chosen: { tool: "read_diff", args: { pr_number: 142 } },
alternatives: [{ tool: "read_file" }, { tool: "list_files" }],
parentSpanId: agentId,
durationMs: 60,
timestamp: endDate(start, 50),
},
],
};
}
function createWebSearchTrace(userId: string) {
const start = daysAgo(0, -3600000);
const searchId = `demo-span-websearch-${userId.slice(0, 8)}`;
return {
trace: {
name: "Web Search Agent",
userId,
status: "COMPLETED" as const,
isDemo: true,
tags: ["search", "web"],
metadata: { query: "AgentLens observability" },
totalCost: 0.002,
totalTokens: 180,
totalDuration: 2800,
startedAt: start,
endedAt: endDate(start, 2800),
},
spans: [
{
id: searchId,
name: "web_search",
type: "TOOL_CALL" as const,
status: "COMPLETED" as const,
input: { query: "AgentLens observability platform" },
output: { results_count: 10, top_result: "https://agentlens.vectry.tech" },
durationMs: 2800,
startedAt: start,
endedAt: endDate(start, 2800),
},
],
events: [] as EventCreate[],
decisions: [] as DecisionCreate[],
};
}