Skip to main content

Supported Providers

Rubric integrates with major LLM providers to evaluate your AI models in production.
ProviderModelsIntegration Method
OpenAIGPT-4, GPT-4 Turbo, GPT-3.5API Key
AnthropicClaude 3, Claude 2API Key
Azure OpenAIGPT-4, GPT-3.5 (Azure-hosted)Azure AD / API Key
Google Vertex AIGemini Pro, PaLM 2Service Account
AWS BedrockClaude, Titan, Llama 2IAM Role
Custom EndpointsAny OpenAI-compatible APIAPI Key / OAuth

OpenAI Integration

openai_integration.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure OpenAI integration
client.integrations.llm.configure(
    provider="openai",
    api_key="sk-xxxxxxxx",  # Stored encrypted

    # Optional: Organization ID
    organization_id="org-xxxxxxxx",

    # Optional: Custom base URL for proxies
    base_url="https://api.openai.com/v1"
)

# Log calls with automatic model detection
client.calls.log(
    project="patient-triage",
    model="gpt-4-turbo",
    input={"messages": [...]},
    output={"response": "..."}
)

Anthropic Integration

anthropic_integration.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure Anthropic integration
client.integrations.llm.configure(
    provider="anthropic",
    api_key="sk-ant-xxxxxxxx"
)

# Log Claude calls
client.calls.log(
    project="symptom-checker",
    model="claude-3-opus",
    input={"prompt": "..."},
    output={"completion": "..."}
)

Azure OpenAI Integration

azure_openai_integration.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure Azure OpenAI
client.integrations.llm.configure(
    provider="azure_openai",

    # Azure-specific settings
    azure_endpoint="https://your-resource.openai.azure.com",
    api_version="2024-02-15-preview",

    # Authentication (choose one)
    api_key="xxxxxxxx",
    # Or use Azure AD
    # use_azure_ad=True,
    # tenant_id="your-tenant-id"
)

Google Vertex AI Integration

vertex_ai_integration.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure Vertex AI
client.integrations.llm.configure(
    provider="vertex_ai",

    # GCP settings
    project_id="your-gcp-project",
    location="us-central1",

    # Authentication
    credentials_path="/path/to/service-account.json"
    # Or use default credentials
    # use_default_credentials=True
)

AWS Bedrock Integration

bedrock_integration.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure AWS Bedrock
client.integrations.llm.configure(
    provider="bedrock",

    # AWS settings
    region="us-east-1",

    # Authentication
    aws_access_key_id="AKIA...",
    aws_secret_access_key="...",
    # Or use IAM role
    # use_iam_role=True
)

Custom Endpoints

Connect to any OpenAI-compatible API:
custom_endpoint.py
from rubric import Rubric

client = Rubric(api_key="gr_live_xxxxxxxx")

# Configure custom LLM endpoint
client.integrations.llm.configure(
    provider="custom",
    name="internal-medical-llm",

    # Endpoint configuration
    base_url="https://llm.internal.company.com/v1",

    # Authentication
    auth_type="bearer",
    api_key="your-internal-key",

    # Optional: Custom headers
    headers={
        "X-Internal-Service": "rubric-eval"
    },

    # Model mapping
    model_aliases={
        "medical-gpt": "internal-medical-v2"
    }
)

Best Practices

PracticeRationale
Use environment variablesNever hardcode API keys in source code
Rotate keys regularlyMinimize exposure from compromised keys
Set up usage alertsMonitor for unexpected API usage spikes
Use separate keys per environmentIsolate production from development