Installation
Copy
Ask AI
pip install rubric
Quick Start
Copy
Ask AI
from rubric import Rubric
# Initialize client
client = Rubric(api_key="gr_live_xxxxxxxx")
# Or use environment variable RUBRIC_API_KEY
client = Rubric()
# Log a call
client.calls.log(
project="my-project",
transcript=[...],
ai_decision={...}
)
# Run an evaluation
evaluation = client.evaluations.create(
name="Weekly Review",
project="proj_abc123",
dataset="ds_xyz789",
evaluators=[{"type": "triage_accuracy"}]
)
Configuration
Client Options
Copy
Ask AI
client = Rubric(
api_key="gr_live_xxxxxxxx", # API key (or use RUBRIC_API_KEY env var)
base_url="https://api.rubric.ai", # Custom endpoint
timeout=30.0, # Request timeout in seconds
max_retries=3, # Retry failed requests
http_client=None # Custom httpx client
)
Environment Variables
| Variable | Description |
|---|---|
RUBRIC_API_KEY | API key for authentication |
RUBRIC_BASE_URL | Custom API endpoint |
RUBRIC_TIMEOUT | Default timeout |
RUBRIC_LOG_LEVEL | Logging verbosity (DEBUG, INFO, WARNING, ERROR) |
Core Resources
Calls
Log and manage voice call data:Copy
Ask AI
# Log a call
call = client.calls.log(
project="patient-triage",
audio_url="https://storage.example.com/call.wav",
transcript=[
{"speaker": "agent", "text": "How can I help?", "start": 0.0, "end": 1.5},
{"speaker": "patient", "text": "I have chest pain", "start": 2.0, "end": 4.0}
],
ai_decision={
"triage_level": "urgent",
"extracted_symptoms": ["chest_pain"]
},
metadata={
"call_id": "call_12345",
"duration_seconds": 180
}
)
print(f"Logged: {call.id}")
# Retrieve a call
call = client.calls.get("call_abc123")
# List calls with filters
calls = client.calls.list(
project="patient-triage",
created_after="2024-01-01",
status="pending_review",
limit=50
)
Notes
Log clinical documentation:Copy
Ask AI
# Log a clinical note
note = client.notes.log(
project="visit-summarizer",
input_text="Patient presents with...",
output={
"soap_note": {
"subjective": "...",
"objective": "...",
"assessment": "...",
"plan": "..."
},
"icd_codes": ["E11.9"]
},
expected={
"icd_codes": ["E11.65"]
}
)
Imaging
Log medical imaging AI outputs:Copy
Ask AI
# Log imaging analysis
study = client.imaging.log(
project="chest-xray-analyzer",
study_uid="1.2.840.113619.2.55...",
dicom_metadata={
"modality": "CR",
"body_part": "CHEST"
},
ai_analysis={
"findings": [...],
"impression": "..."
}
)
Datasets
Manage datasets for evaluation:Copy
Ask AI
# Create a dataset
dataset = client.datasets.create(
name="triage-test-set-v2",
description="Gold standard triage cases",
project="patient-triage"
)
# Add samples
client.datasets.add_samples(
dataset_id=dataset.id,
samples=[
{
"input": {"transcript": "..."},
"expected": {"triage_level": "urgent"}
}
]
)
# List datasets
datasets = client.datasets.list(project="patient-triage")
Evaluations
Run and manage evaluations:Copy
Ask AI
# Create evaluation
evaluation = client.evaluations.create(
name="Weekly Triage Review",
project="proj_abc123",
dataset="ds_xyz789",
evaluators=[
{
"type": "triage_accuracy",
"config": {
"severity_weights": {"under_triage": 5.0}
}
}
]
)
# Check status
evaluation = client.evaluations.get(evaluation.id)
print(f"Status: {evaluation.status}")
print(f"Progress: {evaluation.progress.completed}/{evaluation.progress.total}")
# Get results
results = client.evaluations.get_results(evaluation.id)
for evaluator in results.evaluators:
print(f"{evaluator.name}: {evaluator.score}%")
# List evaluations
evaluations = client.evaluations.list(project="proj_abc123")
# Compare evaluations
comparison = client.evaluations.compare([
"eval_week1", "eval_week2", "eval_week3"
])
Async Support
The SDK provides async versions of all methods:Copy
Ask AI
import asyncio
from rubric import AsyncRubric
async def main():
client = AsyncRubric()
# Async logging
call = await client.calls.log(
project="my-project",
transcript=[...],
ai_decision={...}
)
# Async evaluation
evaluation = await client.evaluations.create(
name="Async Eval",
project="proj_abc123",
dataset="ds_xyz789",
evaluators=[{"type": "triage_accuracy"}]
)
# Wait for completion
while True:
evaluation = await client.evaluations.get(evaluation.id)
if evaluation.status in ["completed", "failed"]:
break
await asyncio.sleep(5)
asyncio.run(main())
Error Handling
Copy
Ask AI
from rubric import Rubric
from rubric.exceptions import (
RubricException,
AuthenticationError,
RateLimitError,
ValidationError,
NotFoundError
)
client = Rubric()
try:
evaluation = client.evaluations.get("eval_nonexistent")
except NotFoundError as e:
print(f"Evaluation not found: {e.message}")
except RateLimitError as e:
print(f"Rate limited. Retry after: {e.retry_after} seconds")
except AuthenticationError as e:
print(f"Auth failed: {e.message}")
except ValidationError as e:
print(f"Invalid request: {e.message}")
print(f"Field: {e.param}")
except RubricException as e:
print(f"Rubric error: {e.message}")
Pagination
Copy
Ask AI
# Manual pagination
page = client.calls.list(project="my-project", limit=100)
all_calls = list(page.data)
while page.has_more:
page = client.calls.list(
project="my-project",
limit=100,
after=page.data[-1].id
)
all_calls.extend(page.data)
# Auto-pagination iterator
for call in client.calls.list_auto(project="my-project"):
process(call)
Webhooks
Copy
Ask AI
# Register a webhook
webhook = client.webhooks.create(
url="https://your-app.com/webhooks/rubric",
events=["evaluation.completed", "review.submitted"],
secret="your_webhook_secret"
)
# Verify webhook signature (in your endpoint)
from rubric.webhooks import verify_signature
@app.post("/webhooks/rubric")
def handle_webhook(request):
signature = request.headers.get("X-Rubric-Signature")
payload = request.body
if verify_signature(payload, signature, webhook_secret):
event = json.loads(payload)
# Handle event
Type Hints
The SDK is fully typed for IDE support:Copy
Ask AI
from rubric import Rubric
from rubric.types import (
Call,
Evaluation,
EvaluationStatus,
EvaluatorConfig,
TriageLevel
)
client = Rubric()
def process_call(call: Call) -> None:
if call.ai_decision.triage_level == TriageLevel.EMERGENT:
alert_team(call)
evaluation: Evaluation = client.evaluations.get("eval_123")
if evaluation.status == EvaluationStatus.COMPLETED:
print(f"Score: {evaluation.results.overall_score}")
