Build Your First AI Agent from Scratch - Part 2: Creating the Basic Agent Structure

Ad Space
Build Your First AI Agent from Scratch - Part 2: Creating the Basic Agent Structure
Welcome back to our comprehensive AI agent tutorial series! In Part 1, we set up our development environment. Now we'll create the foundational structure for your AI agent, including the core agent class, conversation handling, and testing interface.
What You'll Build in This Tutorial
By the end of this tutorial, you'll have:
- ✅ A structured AI agent class with proper initialization
- ✅ Basic conversation handling with OpenAI's API
- ✅ Comprehensive logging and error handling
- ✅ A command-line interface for testing your agent
- ✅ Configuration management system
- ✅ Unit tests for your agent functionality
Estimated Time: 25-30 minutes
Step 1: Creating the Configuration System
First, let's create a robust configuration system to manage our agent's settings.
Create Configuration Module
Create src/utils/config.py
:
"""
Configuration management for AI Agent
"""
import os
from typing import Optional, Dict, Any
from dataclasses import dataclass
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
@dataclass
class AgentConfig:
"""Configuration class for AI Agent"""
# OpenAI Configuration
openai_api_key: str
openai_model: str = "gpt-4"
max_tokens: int = 1000
temperature: float = 0.7
# Agent Configuration
agent_name: str = "AI Assistant"
agent_description: str = "A helpful AI agent"
system_prompt: str = "You are a helpful AI assistant."
# Logging Configuration
log_level: str = "INFO"
log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# Development Settings
debug: bool = False
@classmethod
def from_env(cls) -> 'AgentConfig':
"""Create configuration from environment variables"""
# Required environment variables
openai_api_key = os.getenv('OPENAI_API_KEY')
if not openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is required")
return cls(
# OpenAI settings
openai_api_key=openai_api_key,
openai_model=os.getenv('OPENAI_MODEL', 'gpt-4'),
max_tokens=int(os.getenv('MAX_TOKENS', '1000')),
temperature=float(os.getenv('TEMPERATURE', '0.7')),
# Agent settings
agent_name=os.getenv('AGENT_NAME', 'AI Assistant'),
agent_description=os.getenv('AGENT_DESCRIPTION', 'A helpful AI agent'),
system_prompt=os.getenv('SYSTEM_PROMPT', 'You are a helpful AI assistant.'),
# Logging settings
log_level=os.getenv('LOG_LEVEL', 'INFO'),
# Development settings
debug=os.getenv('DEBUG', 'false').lower() == 'true'
)
def validate(self) -> None:
"""Validate configuration values"""
if not self.openai_api_key.startswith('sk-'):
raise ValueError("Invalid OpenAI API key format")
if self.max_tokens <= 0:
raise ValueError("max_tokens must be positive")
if not 0 <= self.temperature <= 2:
raise ValueError("temperature must be between 0 and 2")
if self.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
raise ValueError("Invalid log level")
def get_config() -> AgentConfig:
"""Get validated configuration instance"""
config = AgentConfig.from_env()
config.validate()
return config
Update Environment Variables
Update your .env
file with additional configuration:
# OpenAI Configuration
OPENAI_API_KEY=your_openai_api_key_here
OPENAI_MODEL=gpt-4
MAX_TOKENS=1000
TEMPERATURE=0.7
# Agent Configuration
AGENT_NAME=MyFirstAgent
AGENT_DESCRIPTION=A helpful AI agent built from scratch
SYSTEM_PROMPT=You are a helpful AI assistant that can engage in conversations and help users with various tasks. Be friendly, informative, and concise in your responses.
# Development Settings
DEBUG=true
LOG_LEVEL=INFO
Step 2: Setting Up Logging
Create a comprehensive logging system for debugging and monitoring.
Create Logging Module
Create src/utils/logger.py
:
"""
Logging configuration for AI Agent
"""
import logging
import sys
from typing import Optional
from pathlib import Path
from rich.logging import RichHandler
from rich.console import Console
def setup_logger(
name: str,
level: str = "INFO",
log_file: Optional[str] = None,
use_rich: bool = True
) -> logging.Logger:
"""
Set up a logger with optional file output and rich formatting
Args:
name: Logger name
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
log_file: Optional file path for log output
use_rich: Whether to use rich formatting for console output
Returns:
Configured logger instance
"""
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, level.upper()))
# Clear existing handlers
logger.handlers.clear()
# Console handler
if use_rich:
console = Console()
console_handler = RichHandler(
console=console,
show_time=True,
show_path=True,
markup=True
)
console_format = "%(message)s"
else:
console_handler = logging.StreamHandler(sys.stdout)
console_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
console_handler.setLevel(getattr(logging, level.upper()))
console_formatter = logging.Formatter(console_format)
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
# File handler (optional)
if log_file:
log_path = Path(log_file)
log_path.parent.mkdir(parents=True, exist_ok=True)
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.DEBUG) # Always log everything to file
file_format = "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s"
file_formatter = logging.Formatter(file_format)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
return logger
class AgentLogger:
"""Centralized logger for the AI Agent"""
_instance = None
_logger = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if self._logger is None:
self._logger = setup_logger(
name="AIAgent",
level="INFO",
log_file="logs/agent.log",
use_rich=True
)
@property
def logger(self) -> logging.Logger:
return self._logger
def set_level(self, level: str):
"""Change logging level"""
self._logger.setLevel(getattr(logging, level.upper()))
for handler in self._logger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.setLevel(getattr(logging, level.upper()))
# Convenience function
def get_logger() -> logging.Logger:
"""Get the agent logger instance"""
return AgentLogger().logger
Step 3: Creating the Core Agent Class
Now let's create the main AI agent class that will handle conversations and interactions.
Create Base Agent Class
Create src/agents/base_agent.py
:
"""
Base AI Agent implementation
"""
import asyncio
from typing import List, Dict, Any, Optional, AsyncGenerator
from dataclasses import dataclass
from datetime import datetime
import uuid
from openai import OpenAI, AsyncOpenAI
from openai.types.chat import ChatCompletion
from ..utils.config import AgentConfig
from ..utils.logger import get_logger
@dataclass
class Message:
"""Represents a conversation message"""
role: str # 'user', 'assistant', 'system'
content: str
timestamp: datetime
message_id: str
metadata: Optional[Dict[str, Any]] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert message to dictionary format"""
return {
"role": self.role,
"content": self.content,
"timestamp": self.timestamp.isoformat(),
"message_id": self.message_id,
"metadata": self.metadata or {}
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> 'Message':
"""Create message from dictionary"""
return cls(
role=data["role"],
content=data["content"],
timestamp=datetime.fromisoformat(data["timestamp"]),
message_id=data["message_id"],
metadata=data.get("metadata")
)
@dataclass
class ConversationContext:
"""Manages conversation state and history"""
conversation_id: str
messages: List[Message]
created_at: datetime
updated_at: datetime
metadata: Dict[str, Any]
def add_message(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> Message:
"""Add a new message to the conversation"""
message = Message(
role=role,
content=content,
timestamp=datetime.now(),
message_id=str(uuid.uuid4()),
metadata=metadata
)
self.messages.append(message)
self.updated_at = datetime.now()
return message
def get_openai_messages(self) -> List[Dict[str, str]]:
"""Convert messages to OpenAI API format"""
return [{"role": msg.role, "content": msg.content} for msg in self.messages]
def get_recent_messages(self, limit: int = 10) -> List[Message]:
"""Get the most recent messages"""
return self.messages[-limit:] if len(self.messages) > limit else self.messages
class BaseAgent:
"""
Base AI Agent class that handles conversations with OpenAI's API
"""
def __init__(self, config: AgentConfig):
"""
Initialize the AI Agent
Args:
config: Agent configuration object
"""
self.config = config
self.logger = get_logger()
# Initialize OpenAI clients
self.client = OpenAI(api_key=config.openai_api_key)
self.async_client = AsyncOpenAI(api_key=config.openai_api_key)
# Agent state
self.conversations: Dict[str, ConversationContext] = {}
self.is_initialized = False
self.logger.info(f"Initialized {config.agent_name}")
def initialize(self) -> None:
"""Initialize the agent (can be overridden by subclasses)"""
if self.is_initialized:
return
self.logger.info("Initializing agent...")
# Test API connection
try:
response = self.client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello"}],
max_tokens=10
)
self.logger.info("✅ OpenAI API connection successful")
except Exception as e:
self.logger.error(f"❌ Failed to connect to OpenAI API: {e}")
raise
self.is_initialized = True
self.logger.info("Agent initialization complete")
def create_conversation(self, conversation_id: Optional[str] = None) -> str:
"""
Create a new conversation context
Args:
conversation_id: Optional custom conversation ID
Returns:
The conversation ID
"""
if conversation_id is None:
conversation_id = str(uuid.uuid4())
context = ConversationContext(
conversation_id=conversation_id,
messages=[],
created_at=datetime.now(),
updated_at=datetime.now(),
metadata={}
)
# Add system message
context.add_message("system", self.config.system_prompt)
self.conversations[conversation_id] = context
self.logger.info(f"Created conversation: {conversation_id}")
return conversation_id
def get_conversation(self, conversation_id: str) -> Optional[ConversationContext]:
"""Get conversation context by ID"""
return self.conversations.get(conversation_id)
def chat(self, message: str, conversation_id: Optional[str] = None) -> str:
"""
Send a message to the agent and get a response
Args:
message: User message
conversation_id: Optional conversation ID (creates new if None)
Returns:
Agent's response
"""
if not self.is_initialized:
self.initialize()
# Create or get conversation
if conversation_id is None:
conversation_id = self.create_conversation()
context = self.get_conversation(conversation_id)
if context is None:
raise ValueError(f"Conversation {conversation_id} not found")
# Add user message
context.add_message("user", message)
try:
# Get response from OpenAI
response = self._get_completion(context)
# Add assistant response
context.add_message("assistant", response)
self.logger.info(f"Conversation {conversation_id}: User -> Agent")
return response
except Exception as e:
self.logger.error(f"Error in chat: {e}")
error_response = "I apologize, but I encountered an error processing your request. Please try again."
context.add_message("assistant", error_response)
return error_response
async def chat_async(self, message: str, conversation_id: Optional[str] = None) -> str:
"""
Async version of chat method
Args:
message: User message
conversation_id: Optional conversation ID
Returns:
Agent's response
"""
if not self.is_initialized:
self.initialize()
# Create or get conversation
if conversation_id is None:
conversation_id = self.create_conversation()
context = self.get_conversation(conversation_id)
if context is None:
raise ValueError(f"Conversation {conversation_id} not found")
# Add user message
context.add_message("user", message)
try:
# Get response from OpenAI
response = await self._get_completion_async(context)
# Add assistant response
context.add_message("assistant", response)
self.logger.info(f"Conversation {conversation_id}: User -> Agent (async)")
return response
except Exception as e:
self.logger.error(f"Error in async chat: {e}")
error_response = "I apologize, but I encountered an error processing your request. Please try again."
context.add_message("assistant", error_response)
return error_response
def _get_completion(self, context: ConversationContext) -> str:
"""
Get completion from OpenAI API
Args:
context: Conversation context
Returns:
Generated response
"""
messages = context.get_openai_messages()
self.logger.debug(f"Sending {len(messages)} messages to OpenAI")
response = self.client.chat.completions.create(
model=self.config.openai_model,
messages=messages,
max_tokens=self.config.max_tokens,
temperature=self.config.temperature
)
return response.choices[0].message.content.strip()
async def _get_completion_async(self, context: ConversationContext) -> str:
"""
Async version of _get_completion
Args:
context: Conversation context
Returns:
Generated response
"""
messages = context.get_openai_messages()
self.logger.debug(f"Sending {len(messages)} messages to OpenAI (async)")
response = await self.async_client.chat.completions.create(
model=self.config.openai_model,
messages=messages,
max_tokens=self.config.max_tokens,
temperature=self.config.temperature
)
return response.choices[0].message.content.strip()
def get_conversation_history(self, conversation_id: str) -> List[Dict[str, Any]]:
"""
Get conversation history as a list of dictionaries
Args:
conversation_id: Conversation ID
Returns:
List of message dictionaries
"""
context = self.get_conversation(conversation_id)
if context is None:
return []
return [msg.to_dict() for msg in context.messages if msg.role != "system"]
def clear_conversation(self, conversation_id: str) -> bool:
"""
Clear a conversation
Args:
conversation_id: Conversation ID to clear
Returns:
True if conversation was found and cleared
"""
if conversation_id in self.conversations:
del self.conversations[conversation_id]
self.logger.info(f"Cleared conversation: {conversation_id}")
return True
return False
def get_stats(self) -> Dict[str, Any]:
"""Get agent statistics"""
total_conversations = len(self.conversations)
total_messages = sum(len(conv.messages) for conv in self.conversations.values())
return {
"agent_name": self.config.agent_name,
"total_conversations": total_conversations,
"total_messages": total_messages,
"model": self.config.openai_model,
"is_initialized": self.is_initialized
}
Step 4: Creating a Command-Line Interface
Let's create a simple CLI to test our agent interactively.
Create CLI Module
Create src/cli.py
:
"""
Command-line interface for AI Agent
"""
import sys
import asyncio
from typing import Optional
import argparse
from rich.console import Console
from rich.panel import Panel
from rich.prompt import Prompt
from rich.table import Table
from rich.markdown import Markdown
from .agents.base_agent import BaseAgent
from .utils.config import get_config
from .utils.logger import get_logger, AgentLogger
class AgentCLI:
"""Command-line interface for the AI Agent"""
def __init__(self):
self.console = Console()
self.config = get_config()
self.agent = BaseAgent(self.config)
self.logger = get_logger()
self.current_conversation_id: Optional[str] = None
def display_welcome(self):
"""Display welcome message"""
welcome_text = f"""
# Welcome to {self.config.agent_name}!
{self.config.agent_description}
**Available Commands:**
- `chat <message>` - Send a message to the agent
- `history` - View conversation history
- `new` - Start a new conversation
- `stats` - Show agent statistics
- `help` - Show this help message
- `quit` or `exit` - Exit the application
**Quick Start:**
Just type your message and press Enter to start chatting!
"""
self.console.print(Panel(
Markdown(welcome_text),
title="AI Agent CLI",
border_style="blue"
))
def display_stats(self):
"""Display agent statistics"""
stats = self.agent.get_stats()
table = Table(title="Agent Statistics")
table.add_column("Metric", style="cyan")
table.add_column("Value", style="green")
for key, value in stats.items():
table.add_row(key.replace("_", " ").title(), str(value))
self.console.print(table)
def display_history(self):
"""Display conversation history"""
if not self.current_conversation_id:
self.console.print("[yellow]No active conversation[/yellow]")
return
history = self.agent.get_conversation_history(self.current_conversation_id)
if not history:
self.console.print("[yellow]No messages in current conversation[/yellow]")
return
self.console.print(f"\n[bold]Conversation History ({self.current_conversation_id[:8]}...)[/bold]")
for msg in history:
role = msg["role"]
content = msg["content"]
timestamp = msg["timestamp"]
if role == "user":
self.console.print(f"\n[bold blue]You ({timestamp}):[/bold blue]")
self.console.print(content)
elif role == "assistant":
self.console.print(f"\n[bold green]Agent ({timestamp}):[/bold green]")
self.console.print(content)
def process_command(self, user_input: str) -> bool:
"""
Process user command
Args:
user_input: User input string
Returns:
False if should exit, True otherwise
"""
user_input = user_input.strip()
if not user_input:
return True
# Handle exit commands
if user_input.lower() in ['quit', 'exit', 'q']:
return False
# Handle special commands
if user_input.lower() == 'help':
self.display_welcome()
return True
if user_input.lower() == 'stats':
self.display_stats()
return True
if user_input.lower() == 'history':
self.display_history()
return True
if user_input.lower() == 'new':
self.current_conversation_id = None
self.console.print("[green]Started new conversation[/green]")
return True
# Handle chat command or direct message
if user_input.lower().startswith('chat '):
message = user_input[5:] # Remove 'chat ' prefix
else:
message = user_input
# Send message to agent
try:
with self.console.status("[bold green]Agent is thinking..."):
response = self.agent.chat(message, self.current_conversation_id)
# Update current conversation ID if it was None
if self.current_conversation_id is None:
# Get the most recent conversation ID
if self.agent.conversations:
self.current_conversation_id = list(self.agent.conversations.keys())[-1]
# Display response
self.console.print(f"\n[bold green]{self.config.agent_name}:[/bold green]")
self.console.print(response)
self.console.print()
except Exception as e:
self.console.print(f"[red]Error: {e}[/red]")
self.logger.error(f"CLI error: {e}")
return True
def run(self):
"""Run the CLI interface"""
try:
# Initialize agent
self.agent.initialize()
# Display welcome message
self.display_welcome()
# Main interaction loop
while True:
try:
user_input = Prompt.ask("\n[bold cyan]You[/bold cyan]")
if not self.process_command(user_input):
break
except KeyboardInterrupt:
self.console.print("\n[yellow]Goodbye![/yellow]")
break
except EOFError:
break
except Exception as e:
self.console.print(f"[red]Fatal error: {e}[/red]")
self.logger.error(f"Fatal CLI error: {e}")
sys.exit(1)
def main():
"""Main entry point for CLI"""
parser = argparse.ArgumentParser(description="AI Agent Command Line Interface")
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug logging"
)
parser.add_argument(
"--log-level",
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
default="INFO",
help="Set logging level"
)
args = parser.parse_args()
# Set logging level
if args.debug:
AgentLogger().set_level("DEBUG")
else:
AgentLogger().set_level(args.log_level)
# Run CLI
cli = AgentCLI()
cli.run()
if __name__ == "__main__":
main()
Step 5: Creating Unit Tests
Let's create comprehensive tests for our agent functionality.
Create Test Configuration
Create tests/conftest.py
:
"""
Test configuration and fixtures
"""
import pytest
import os
from unittest.mock import Mock, patch
from src.utils.config import AgentConfig
from src.agents.base_agent import BaseAgent
@pytest.fixture
def mock_config():
"""Mock configuration for testing"""
return AgentConfig(
openai_api_key="sk-test-key-123",
openai_model="gpt-3.5-turbo",
max_tokens=100,
temperature=0.7,
agent_name="Test Agent",
agent_description="A test agent",
system_prompt="You are a test assistant.",
log_level="DEBUG",
debug=True
)
@pytest.fixture
def mock_openai_response():
"""Mock OpenAI API response"""
mock_response = Mock()
mock_response.choices = [Mock()]
mock_response.choices[0].message.content = "This is a test response."
return mock_response
@pytest.fixture
def agent(mock_config):
"""Create agent instance for testing"""
with patch('src.agents.base_agent.OpenAI'), \
patch('src.agents.base_agent.AsyncOpenAI'):
return BaseAgent(mock_config)
Create Agent Tests
Create tests/test_base_agent.py
:
"""
Tests for BaseAgent class
"""
import pytest
from unittest.mock import Mock, patch, MagicMock
from datetime import datetime
from src.agents.base_agent import BaseAgent, Message, ConversationContext
from src.utils.config import AgentConfig
class TestMessage:
"""Test Message class"""
def test_message_creation(self):
"""Test message creation"""
msg = Message(
role="user",
content="Hello",
timestamp=datetime.now(),
message_id="test-123"
)
assert msg.role == "user"
assert msg.content == "Hello"
assert msg.message_id == "test-123"
def test_message_to_dict(self):
"""Test message serialization"""
timestamp = datetime.now()
msg = Message(
role="user",
content="Hello",
timestamp=timestamp,
message_id="test-123",
metadata={"test": "value"}
)
data = msg.to_dict()
assert data["role"] == "user"
assert data["content"] == "Hello"
assert data["message_id"] == "test-123"
assert data["metadata"]["test"] == "value"
def test_message_from_dict(self):
"""Test message deserialization"""
timestamp = datetime.now()
data = {
"role": "assistant",
"content": "Hi there!",
"timestamp": timestamp.isoformat(),
"message_id": "test-456",
"metadata": {"test": "value"}
}
msg = Message.from_dict(data)
assert msg.role == "assistant"
assert msg.content == "Hi there!"
assert msg.message_id == "test-456"
assert msg.metadata["test"] == "value"
class TestConversationContext:
"""Test ConversationContext class"""
def test_add_message(self):
"""Test adding messages to conversation"""
context = ConversationContext(
conversation_id="test-conv",
messages=[],
created_at=datetime.now(),
updated_at=datetime.now(),
metadata={}
)
msg = context.add_message("user", "Hello")
assert len(context.messages) == 1
assert msg.role == "user"
assert msg.content == "Hello"
assert msg.message_id is not None
def test_get_openai_messages(self):
"""Test OpenAI message format conversion"""
context = ConversationContext(
conversation_id="test-conv",
messages=[],
created_at=datetime.now(),
updated_at=datetime.now(),
metadata={}
)
context.add_message("system", "You are helpful")
context.add_message("user", "Hello")
context.add_message("assistant", "Hi there!")
openai_msgs = context.get_openai_messages()
assert len(openai_msgs) == 3
assert openai_msgs[0] == {"role": "system", "content": "You are helpful"}
assert openai_msgs[1] == {"role": "user", "content": "Hello"}
assert openai_msgs[2] == {"role": "assistant", "content": "Hi there!"}
class TestBaseAgent:
"""Test BaseAgent class"""
@patch('src.agents.base_agent.OpenAI')
@patch('src.agents.base_agent.AsyncOpenAI')
def test_agent_initialization(self, mock_async_openai, mock_openai, mock_config):
"""Test agent initialization"""
agent = BaseAgent(mock_config)
assert agent.config == mock_config
assert not agent.is_initialized
assert len(agent.conversations) == 0
@patch('src.agents.base_agent.OpenAI')
@patch('src.agents.base_agent.AsyncOpenAI')
def test_create_conversation(self, mock_async_openai, mock_openai, mock_config):
"""Test conversation creation"""
agent = BaseAgent(mock_config)
conv_id = agent.create_conversation()
assert conv_id in agent.conversations
context = agent.get_conversation(conv_id)
assert context is not None
assert len(context.messages) == 1 # System message
assert context.messages[0].role == "system"
@patch('src.agents.base_agent.OpenAI')
@patch('src.agents.base_agent.AsyncOpenAI')
def test_chat_functionality(self, mock_async_openai, mock_openai, mock_config, mock_openai_response):
"""Test basic chat functionality"""
# Setup mocks
mock_client = Mock()
mock_client.chat.completions.create.return_value = mock_openai_response
mock_openai.return_value = mock_client
agent = BaseAgent(mock_config)
# Test chat
response = agent.chat("Hello, how are you?")
assert response == "This is a test response."
assert len(agent.conversations) == 1
# Verify API was called
mock_client.chat.completions.create.assert_called_once()
@patch('src.agents.base_agent.OpenAI')
@patch('src.agents.base_agent.AsyncOpenAI')
def test_get_stats(self, mock_async_openai, mock_openai, mock_config):
"""Test agent statistics"""
agent = BaseAgent(mock_config)
stats = agent.get_stats()
assert stats["agent_name"] == mock_config.agent_name
assert stats["total_conversations"] == 0
assert stats["total_messages"] == 0
assert stats["model"] == mock_config.openai_model
assert not stats["is_initialized"]
Step 6: Running Your Agent
Now let's test everything we've built!
Create Main Entry Point
Create main.py
in your project root:
#!/usr/bin/env python3
"""
Main entry point for AI Agent
"""
import sys
from pathlib import Path
# Add src to Python path
sys.path.insert(0, str(Path(__file__).parent / "src"))
from src.cli import main
if __name__ == "__main__":
main()
Test Your Agent
-
Run the verification script:
python test_setup.py
-
Run the unit tests:
pytest tests/ -v
-
Start the CLI interface:
python main.py
-
Test basic conversation:
You: Hello! How are you today? Agent: Hello! I'm doing well, thank you for asking. I'm here and ready to help you with any questions or tasks you might have. How are you doing today? You: What can you help me with? Agent: I can help you with a wide variety of tasks! Here are some examples: - Answering questions on various topics - Helping with writing and editing - Explaining concepts and providing information - Assisting with problem-solving - And much more! What would you like to work on today?
Troubleshooting Common Issues
Issue: Import errors
Solution: Make sure your Python path includes the src directory:
export PYTHONPATH="${PYTHONPATH}:$(pwd)/src"
Issue: OpenAI API errors
Solution: Check your API key and credits:
# Test API connection
python -c "
from src.utils.config import get_config
from openai import OpenAI
config = get_config()
client = OpenAI(api_key=config.openai_api_key)
print('API connection successful!')
"
Issue: Rich formatting not working
Solution: Install rich with color support:
pip install rich[color]
What You've Accomplished
Congratulations! You've built a solid foundation for your AI agent:
- ✅ Configuration System - Flexible, environment-based configuration
- ✅ Logging Framework - Rich, structured logging with file output
- ✅ Core Agent Class - Conversation handling with OpenAI integration
- ✅ CLI Interface - Interactive testing environment
- ✅ Unit Tests - Comprehensive test coverage
- ✅ Error Handling - Robust error management and recovery
Key Features Implemented:
- Conversation Management - Multiple concurrent conversations
- Message History - Persistent conversation context
- Async Support - Both sync and async API calls
- Rich CLI - Beautiful command-line interface
- Comprehensive Logging - Debug and production logging
- Configuration Validation - Environment variable validation
- Unit Testing - Test-driven development approach
What's Next?
In Part 3: Adding Memory and Context Handling, you'll learn:
- Implementing persistent conversation memory
- Context window management for long conversations
- Message summarization and compression
- Conversation persistence to files/databases
- Advanced context retrieval strategies
Quick Reference Commands
# Run your agent
python main.py
# Run tests
pytest tests/ -v
# Run with debug logging
python main.py --debug
# Check agent stats
# (Use 'stats' command in CLI)
Additional Resources
- OpenAI Python Library: github.com/openai/openai-python
- Rich Documentation: rich.readthedocs.io
- Pytest Documentation: docs.pytest.org
- Python Async Programming: docs.python.org/3/library/asyncio.html
Ready to add memory and context handling to your agent? Continue to Part 3: Adding Memory and Context Handling to make your agent even smarter!
Tutorial Navigation
- ← Part 1: Development Environment Setup
- Part 2: Creating the Basic Agent Structure (Current)
- Part 3: Adding Memory and Context Handling →
This tutorial is part of our comprehensive AI Agent Development series. If you found this helpful, consider subscribing to our newsletter for more in-depth tutorials and AI development insights.
Ad Space
Recommended Tools & Resources
* This section contains affiliate links. We may earn a commission when you purchase through these links at no additional cost to you.
📚 Featured AI Books
OpenAI API
AI PlatformAccess GPT-4 and other powerful AI models for your agent development.
LangChain Plus
FrameworkAdvanced framework for building applications with large language models.
Pinecone Vector Database
DatabaseHigh-performance vector database for AI applications and semantic search.
AI Agent Development Course
EducationComplete course on building production-ready AI agents from scratch.
💡 Pro Tip
Start with the free tiers of these tools to experiment, then upgrade as your AI agent projects grow. Most successful developers use a combination of 2-3 core tools rather than trying everything at once.
🚀 Join the AgentForge Community
Get weekly insights, tutorials, and the latest AI agent developments delivered to your inbox.
No spam, ever. Unsubscribe at any time.