Get started in minutes with any of the three projects.
# Point any HTTP client at the gateway instead of the provider
curl http://localhost:7680/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $OPENAI_API_KEY" \
-H "X-Majordomo-Key: your-majordomo-key" \
-H "X-Majordomo-App-Name: my-app" \
-H "X-Majordomo-User-Id: user-123" \
-d '{
"model": "gpt-4o",
"messages": [{"role": "user", "content": "Hello!"}]
}'
# Response includes cost headers:
# X-Majordomo-Input-Cost: 0.000125
# X-Majordomo-Output-Cost: 0.000250
# X-Majordomo-Total-Cost: 0.000375
from pydantic import BaseModel
from majordomo_llm import get_llm_instance
llm = get_llm_instance(provider="openai", model="gpt-4o")
# Simple request with automatic cost tracking
response = await llm.get_response(
system_prompt="You are a helpful assistant.",
user_prompt="Explain async/await in Python.",
)
print(response.text)
print(f"Cost: ${response.total_cost:.6f}")
# Structured output — one API for every provider
class MovieReview(BaseModel):
title: str
rating: float
summary: str
review = await llm.get_structured_json_response(
system_prompt="You are a movie critic.",
user_prompt="Review the movie Inception.",
output_type=MovieReview,
)
print(f"{review.title}: {review.rating}/10")
# Pydantic AI adapter
from pydantic_ai import Agent
from majordomo_frameworks.pydantic_ai import create_model, build_extra_headers
model = create_model(
provider="openai",
model_name="gpt-4o",
gateway_url="http://localhost:7680",
)
agent = Agent(model)
result = await agent.run(
"Summarize this document.",
model_settings={"extra_headers": build_extra_headers(
majordomo_key="your-key",
app_name="my-agent",
)},
)
# Agno adapter
from agno.agent import Agent as AgnoAgent
from agno.models.openai import OpenAIChat
from majordomo_frameworks.agno import patch_model
model = patch_model(
OpenAIChat(id="gpt-4o"),
gateway_url="http://localhost:7680",
majordomo_key="your-key",
)
agent = AgnoAgent(model=model)