Examples

Anthropic / Claude

The primary use case for Raison — manage your Claude system prompts without redeploying.

import Anthropic from "@anthropic-ai/sdk";
import { Raison } from "raison";

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY });
const anthropic = new Anthropic();

async function chat(userMessage: string, userId: string) {
  const systemPrompt = await raison.render("SYSTEM_PROMPT_ID", {
    userId,
    date: new Date().toISOString(),
    modelVersion: "claude-sonnet-4-6",
  });

  const response = await anthropic.messages.create({
    model: "claude-sonnet-4-6",
    max_tokens: 1024,
    system: systemPrompt,
    messages: [{ role: "user", content: userMessage }],
  });

  return response.content[0].type === "text" ? response.content[0].text : "";
}

OpenAI

import OpenAI from "openai";
import { Raison } from "raison";

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY });
const openai = new OpenAI();

async function chat(userMessage: string) {
  const systemPrompt = await raison.render("SYSTEM_PROMPT_ID", {
    date: new Date().toISOString(),
  });

  const response = await openai.chat.completions.create({
    model: "gpt-4o",
    messages: [
      { role: "system", content: systemPrompt },
      { role: "user", content: userMessage },
    ],
  });

  return response.choices[0].message.content;
}

Express.js

import { Raison } from "raison";
import express from "express";

// Create once at startup — the connection is reused across all requests
const raison = new Raison({ apiKey: process.env.RAISON_API_KEY });
const app = express();
app.use(express.json());

app.post("/chat", async (req, res) => {
  const { userId, message } = req.body;

  const systemPrompt = await raison.render("SYSTEM_PROMPT_ID", {
    userId,
    userRole: req.user?.role ?? "guest",
  });

  if (!systemPrompt) {
    return res.status(500).json({ error: "System prompt not found" });
  }

  // Pass systemPrompt to your LLM...
  res.json({ systemPrompt });
});

app.listen(3000);

Error Handling

Guard against missing prompts and handle failures gracefully:

import { Raison } from "raison";

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY });

const FALLBACK_PROMPT = "You are a helpful assistant.";

async function getSystemPrompt(variables: Record<string, unknown>) {
  try {
    const prompt = await raison.render("SYSTEM_PROMPT_ID", variables);

    // render() returns "" for missing prompts — use fallback
    if (!prompt) {
      console.warn("System prompt not found, using fallback");
      return FALLBACK_PROMPT;
    }

    return prompt;
  } catch (error) {
    console.error("Failed to render prompt:", error);
    return FALLBACK_PROMPT;
  }
}

Multiple Environments

Run separate instances for different environments in the same process:

import { Raison } from "raison";

const dev = new Raison({ apiKey: process.env.RAISON_DEV_KEY! });
const prod = new Raison({ apiKey: process.env.RAISON_PROD_KEY! });

// Useful for canary deployments or A/B testing infrastructure
async function getPrompt(environment: "dev" | "prod", promptId: string) {
  const client = environment === "dev" ? dev : prod;
  return client.render(promptId, { env: environment });
}

Next.js / Serverless Singleton

In serverless environments, prevent creating a new connection per request by using a module-level singleton:

// lib/raison.ts
import { Raison } from "raison";

let instance: Raison | null = null;

export function getRaison(): Raison {
  if (!instance) {
    instance = new Raison({ apiKey: process.env.RAISON_API_KEY! });
  }
  return instance;
}
// app/api/chat/route.ts (Next.js App Router)
import { getRaison } from "@/lib/raison";
import { NextResponse } from "next/server";

export async function POST(request: Request) {
  const { message } = await request.json();
  const raison = getRaison();

  const systemPrompt = await raison.render("SYSTEM_PROMPT_ID", {
    date: new Date().toISOString(),
  });

  // ... call your LLM with systemPrompt
  return NextResponse.json({ systemPrompt });
}

Note: In Next.js development mode, hot module replacement may create multiple instances. This is harmless — each instance maintains its own WebSocket and in-memory store. In production, the singleton pattern ensures a single persistent connection.

Custom Helpers for AI Context

Register helpers at startup for common AI prompt patterns:

import { Raison } from "raison";

// Date formatting
Raison.registerHelper("formatDate", (iso: string) => {
  return new Date(iso).toLocaleDateString("en-US", {
    weekday: "long",
    year: "numeric",
    month: "long",
    day: "numeric",
  });
});

// JSON embedding (for structured context)
Raison.registerHelper("json", (obj: unknown) => JSON.stringify(obj, null, 2));

// Conditional text
Raison.registerHelper("tier", function (plan: string, options: Handlebars.HelperOptions) {
  return plan === "enterprise" ? options.fn(this) : options.inverse(this);
});

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY! });

// Template in the dashboard can now use:
// "Today is {{formatDate currentDate}}."
// "User data: {{json userData}}"
// "{{#tier plan}}Enterprise customer.{{else}}Standard customer.{{/tier}}"

Graceful Shutdown

import { Raison } from "raison";

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY! });

process.on("SIGTERM", () => {
  raison.disconnect();
  process.exit(0);
});

process.on("SIGINT", () => {
  raison.disconnect();
  process.exit(0);
});

Generic LLM Pattern

A reusable wrapper that works with any LLM:

import { Raison } from "raison";

const raison = new Raison({ apiKey: process.env.RAISON_API_KEY! });

interface ChatOptions {
  promptId: string;
  variables?: Record<string, unknown>;
  userMessage: string;
}

async function buildSystemPrompt(options: ChatOptions): Promise<string> {
  const rendered = await raison.render(options.promptId, options.variables);
  return rendered || "You are a helpful assistant.";
}

// Usage with any LLM SDK:
const system = await buildSystemPrompt({
  promptId: "SYSTEM_PROMPT_ID",
  variables: { userName: "Alice", date: new Date().toISOString() },
  userMessage: "How do I reset my password?",
});
// → pass `system` to your LLM's system message parameter