TinyAgent Documentation
Complete guide to building intelligent agents with three-tier architecture, 20 built-in tools, enterprise security, and multi-provider flexibility.
Getting Started
Installation
# Requirements: Python 3.10+
pip install tinyagent-py
# Required dependencies (automatically installed):
# rich>=14.0.0, cloudpickle>=3.0.0, jinja2>=3.1.0,
# pyyaml>=6.0.0, modal>=1.1.0 (for TinyCodeAgent)
# Verify installation
python -c "import tinyagent; print(tinyagent.__version__)"
Basic Usage
import asyncio
from tinyagent import TinyAgent
async def main():
# Initialize TinyAgent with your preferred model
agent = TinyAgent(
model="gpt-4o-mini", # or "claude-3-5-sonnet", "gpt-4o", etc.
api_key="your-api-key"
)
try:
result = await agent.run("Hello, TinyAgent!")
print(result)
finally:
await agent.close()
asyncio.run(main())
Three-Tier Architecture
TinyAgent's revolutionary three-tier architecture provides scalable, distributed AI agent systems with specialized roles and parallel processing.
Tier 1: TinyAgent
Main orchestrator and decision maker. Coordinates tasks and manages overall workflow.
Tier 2: TinyCodeAgent
Specialized coding assistant for development tasks with enhanced code understanding.
Tier 3: Subagent Swarm
Parallel workers for distributed task execution and specialized operations.
Implementation Example
import asyncio
from tinyagent import TinyAgent
from tinyagent.tools.subagent import create_general_subagent, create_coding_subagent
from tinyagent.tools.todo_write import enable_todo_write_tool
async def main():
# Tier 1: TinyAgent - Main orchestrator
main_agent = TinyAgent(
model="gpt-4o",
api_key="your-api-key",
enable_todo_write=True # Built-in task tracking
)
# Tier 2: TinyCodeAgent - Specialized coding assistant
code_agent = create_coding_subagent(
"coder",
model="claude-3-5-sonnet",
max_turns=25
)
# Tier 3: Subagent Swarm - Parallel task workers
research_agent = create_general_subagent(
"researcher",
model="gpt-4o-mini",
max_turns=20
)
# Add subagents to main agent
main_agent.add_tool(code_agent)
main_agent.add_tool(research_agent)
try:
result = await main_agent.run("""
Complex project workflow:
1. Research AI trends 2024
2. Code a data analysis script
3. Track progress with todos
4. Generate comprehensive report
Use the coding subagent for implementation and
research subagent for information gathering.
""")
print(result)
finally:
await main_agent.close()
asyncio.run(main())
20 Built-in Tools
TinyAgent comes with a comprehensive toolkit for file operations, web scraping, code execution, data processing, and more.
# TinyAgent comes with 20 built-in tools:
# File Operations
- read_file, write_file, edit_file
- list_files, create_directory
- file_search, glob_search
# Code & Development
- python_execute, shell_execute
- git_operations, code_analysis
# Web & Network
- web_fetch, web_search
- http_request, api_call
# Data Processing
- json_parse, csv_operations
- data_transform, regex_operations
# Task Management
- todo_write, task_tracking
- progress_monitor
# Subagent Tools
- subagent_create, subagent_delegate
- swarm_coordinate
# Storage & Memory
- session_storage, memory_store
- context_persist
# And many more...
Enterprise Features
Security & Authentication
API key management, rate limiting, audit logging, and secure storage
Monitoring & Analytics
Performance tracking, cost monitoring, and usage analytics
Enterprise Implementation
import asyncio
from tinyagent import TinyAgent
from tinyagent.storage import JsonFileStorage, DatabaseStorage
from tinyagent.hooks.rich_ui_callback import RichUICallback
from tinyagent.hooks import anthropic_prompt_cache
from tinyagent.security import APIKeyManager, RateLimiter
async def main():
# Enterprise-grade storage (None by default)
storage = DatabaseStorage("postgresql://user:pass@localhost/agents")
# Alternative: JsonFileStorage for file-based storage
# storage = JsonFileStorage("./sessions")
# Beautiful terminal UI
ui = RichUICallback(markdown=True, show_thinking=True)
# Cost optimization with prompt caching
cache_callback = anthropic_prompt_cache()
# Security features
api_manager = APIKeyManager()
rate_limiter = RateLimiter(requests_per_minute=100)
agent = TinyAgent(
model="claude-3-5-sonnet-20241022",
api_key=api_manager.get_key("anthropic"),
storage=storage,
session_id="enterprise_session",
enable_todo_write=True
)
# Add all callbacks and security
agent.add_callback(cache_callback)
agent.add_callback(ui)
agent.add_callback(rate_limiter)
try:
result = await agent.run("Enterprise task processing")
print(result)
finally:
await agent.close()
asyncio.run(main())
Multi-Provider Support
Seamlessly switch between OpenAI, Anthropic, Google, and custom providers. Automatic model routing based on task requirements.
# Multi-provider support via environment variables or direct API keys
import asyncio
import os
from tinyagent import TinyAgent
# OpenAI Models
openai_agent = TinyAgent(
model="gpt-4o-mini",
api_key=os.getenv("OPENAI_API_KEY")
)
# Anthropic Models
claude_agent = TinyAgent(
model="claude-3-5-sonnet-20241022",
api_key=os.getenv("ANTHROPIC_API_KEY")
)
# Google Models
gemini_agent = TinyAgent(
model="gemini-1.5-pro",
api_key=os.getenv("GOOGLE_API_KEY")
)
# Custom/Local Models (via LiteLLM)
custom_agent = TinyAgent(
model="custom/your-model",
api_key="your-custom-key",
base_url="https://your-api-endpoint.com/v1"
)
# Agents automatically use the correct provider based on model name