Master the art of defining powerful AI agents in your workspace
agents:
analyst:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Analyze data and provide insights"
prompts:
system: "You are a data analyst..."
agents:
chat:
type: "system"
agent: "conversation"
config:
model: "claude-3-5-sonnet-20241022"
use_reasoning: true
agents:
external-ai:
type: "remote"
config:
protocol: "acp"
endpoint: "https://api.example.com/agent"
auth:
type: "bearer"
token_env: "API_TOKEN"
agents:
my-agent:
type: "llm" # Required: Agent type
model: "claude-3-5-sonnet-20241022" # Required: Model name
purpose: "What this agent does" # Required: Clear purpose
prompts: # Required: Instructions
system: "Agent instructions..."
agents:
advanced-agent:
# Identity
type: "llm"
model: "claude-3-5-sonnet-20241022"
description: "Detailed description for documentation"
purpose: "Concise statement of purpose"
# Prompts
prompts:
system: |
Detailed system instructions that define:
- Agent's role and expertise
- Communication style
- Constraints and guidelines
- Output format expectations
user: |
Optional user prompt template with {variables}
that can be filled during execution
# Model Configuration
config:
# Model parameters
temperature: 0.7 # 0.0-1.0, controls randomness
max_tokens: 2000 # Maximum response length
top_p: 0.9 # Nucleus sampling threshold
frequency_penalty: 0.1 # Reduce repetition
presence_penalty: 0.1 # Encourage topic diversity
# Provider configuration
provider: "anthropic" # or "openai", "google", etc.
api_key_env: "ANTHROPIC_API_KEY" # Environment variable
# Operational settings
timeout: "120s" # Maximum execution time
retry_on_failure: true # Automatic retry
max_retries: 3 # Number of retries
# Memory settings
memory_enabled: true # Access workspace memory
memory_scope: "session" # "agent", "session", or "workspace"
context_window: 10 # Recent memories to include
# Reasoning mode
use_reasoning: true # Enable step-by-step reasoning
max_reasoning_steps: 15 # Maximum reasoning iterations
# Caching
cache_enabled: true # Cache responses
cache_ttl: "3600s" # Cache duration
# Tool access
tools: ["web-search", "calculator", "file-reader"]
# Permissions (optional)
permissions:
allow_net: ["api.example.com"]
allow_read: ["./data"]
deny_write: ["./"]
# Fast, efficient, cost-effective
model: "claude-3-5-haiku-20241022"
# Use for: Simple tasks, quick responses, high volume
# Balanced performance (recommended for most tasks)
model: "claude-3-5-sonnet-20241022"
# Use for: General purpose, complex reasoning, coding
# Maximum capability
model: "claude-3-5-opus-20241022"
# Use for: Critical decisions, complex analysis, creative work
# Legacy, fast
model: "gpt-3.5-turbo"
# Current generation
model: "gpt-4"
model: "gpt-4-turbo"
# Specialized
model: "gpt-4-vision" # Image understanding
# Google
model: "gemini-pro"
model: "gemini-ultra"
# Mistral
model: "mistral-tiny"
model: "mistral-small"
model: "mistral-medium"
# Local models (Ollama)
model: "ollama:llama2"
model: "ollama:mistral"
model: "ollama:codellama"
agents:
support-chat:
type: "system"
agent: "conversation"
config:
model: "claude-3-5-sonnet-20241022"
system_prompt: |
You are a friendly support agent.
Help users with their questions.
# Conversation features
memory_enabled: true
max_conversation_length: 100
streaming: true
# Advanced reasoning
use_reasoning: true
max_reasoning_steps: 10
# Tools for the conversation
tools: ["search-docs", "file-reader"]
agents:
# Memory management
memory-manager:
type: "system"
agent: "memory"
config:
operation_mode: "optimize"
retention_days: 30
# Content synthesis
synthesizer:
type: "system"
agent: "synthesizer"
config:
synthesis_mode: "comprehensive"
output_format: "markdown"
agents:
remote-analyst:
type: "remote"
description: "External analysis service"
config:
protocol: "acp"
endpoint: "https://analysis-api.example.com/v1/agent"
# Authentication
auth:
type: "bearer"
token_env: "ANALYSIS_API_TOKEN"
# ACP-specific settings
agent_name: "data-analyzer"
default_mode: "sync" # or "async"
timeout: "60s"
# Health checks
health_check_interval: "30s"
health_check_endpoint: "/health"
# Schema validation
schema:
validate_input: true
validate_output: true
input:
type: "object"
properties:
data: { type: "array" }
analysis_type: { type: "string" }
required: ["data"]
agents:
rest-service:
type: "remote"
config:
protocol: "rest"
base_url: "https://api.service.com"
# Endpoints
endpoints:
execute: "/execute"
status: "/status/{task_id}"
result: "/result/{task_id}"
# Authentication
auth:
type: "api_key"
header: "X-API-Key"
key_env: "SERVICE_API_KEY"
# Request configuration
headers:
"Content-Type": "application/json"
"User-Agent": "Atlas/1.0"
# Retry policy
retry:
max_attempts: 3
backoff: "exponential"
initial_delay: "1s"
agents:
# Legal expert
legal-advisor:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Provide legal analysis and guidance"
prompts:
system: |
You are a legal expert specializing in:
- Contract law
- Intellectual property
- Corporate compliance
Important: Always include disclaimers about
not providing official legal advice.
config:
temperature: 0.2 # Low for accuracy
# Financial analyst
financial-analyst:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Analyze financial data and trends"
prompts:
system: |
You are a CFA with expertise in:
- Financial modeling
- Risk assessment
- Market analysis
Always show calculations and cite sources.
tools: ["calculator", "market-data-api"]
agents:
# Research lead
lead-researcher:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Coordinate research and delegate tasks"
prompts:
system: |
You lead a research team. Your role:
1. Break down research questions
2. Identify what each specialist should investigate
3. Synthesize findings into insights
# Subject specialist
domain-expert:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Provide deep domain expertise"
prompts:
system: |
You are a subject matter expert.
Provide detailed, accurate information
in your domain. Always cite sources.
# Fact checker
verifier:
type: "llm"
model: "claude-3-5-haiku-20241022"
purpose: "Verify facts and check sources"
prompts:
system: |
You verify information accuracy.
Check facts, validate sources,
and identify potential biases.
agents:
adaptive-assistant:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Adapt communication style to user needs"
prompts:
system: |
You adapt your communication based on:
Technical users:
- Use precise terminology
- Provide implementation details
- Include code examples
Business users:
- Focus on outcomes and ROI
- Use clear, non-technical language
- Provide executive summaries
Beginners:
- Explain concepts simply
- Use analogies and examples
- Encourage questions
user: |
User type: {user_type}
Query: {query}
agents:
quality-checker:
type: "llm"
model: "claude-3-5-sonnet-20241022"
purpose: "Review and improve content quality"
prompts:
system: |
You are a quality control specialist.
Review content for:
1. Accuracy - Verify all facts
2. Clarity - Ensure easy understanding
3. Completeness - Check nothing is missing
4. Consistency - Maintain uniform style
5. Compliance - Meet requirements
Provide specific improvement suggestions.
config:
temperature: 0.1 # Very consistent
# Good - Specific purpose
purpose: "Analyze customer feedback and identify improvement areas"
# Too vague
purpose: "Help with stuff"
prompts:
system: |
# Role
You are a [specific role] with expertise in [domains].
# Objectives
Your primary goals are:
1. [Goal 1]
2. [Goal 2]
# Constraints
Always:
- [Requirement 1]
- [Requirement 2]
Never:
- [Restriction 1]
- [Restriction 2]
# Output Format
Structure responses as:
- [Format specification]
# Analytical tasks (low creativity needed)
config:
temperature: 0.1-0.3
# Balanced tasks
config:
temperature: 0.5-0.7
# Creative tasks
config:
temperature: 0.8-1.0
config:
# Token limits
max_tokens: 1000 # Don't waste tokens
max_input_tokens: 4000 # Prevent context overflow
# Timeouts
timeout: "30s" # Fail fast
# Caching
cache_enabled: true # Reuse responses
cache_ttl: "1h" # Appropriate TTL
config:
# Retry strategy
retry_on_failure: true
max_retries: 3
retry_delay: "2s"
# Fallback options
fallback_model: "claude-3-5-haiku-20241022"
fallback_temperature: 0.3
# test-agent.yml
agents:
test-agent:
type: "llm"
model: "claude-3-5-haiku-20241022"
purpose: "Test agent behavior"
prompts:
system: "You are a test agent. Echo back the input."
signals:
test:
provider: "cli"
jobs:
test-job:
triggers:
- signal: "test"
execution:
agents:
- id: "test-agent"
# Test normal input
atlas signal trigger test --data '{"input": "Normal test"}'
# Test edge cases
atlas signal trigger test --data '{"input": ""}'
atlas signal trigger test --data '{"input": "Very long input..."}'
config:
# Enable metrics
metrics_enabled: true
log_token_usage: true
log_response_time: true
# Bad - Too many responsibilities
agents:
do-everything:
purpose: "Research, analyze, write, edit, and publish"
# Good - Focused agents
agents:
researcher:
purpose: "Research topics thoroughly"
writer:
purpose: "Write engaging content"
prompts:
system: |
If you encounter errors:
- Explain what went wrong
- Suggest alternatives
- Ask for clarification if needed
prompts:
system: |
When using tools:
- Explain which tool you're using
- Handle tool failures gracefully
- Try alternatives if tools fail