Simple Assistant#
This example demonstrates how to create a simple assistant that can perform calculations, tell the time, and provide random numbers.
Basic Tools#
First, let’s define some simple tools:
from brain.agents.tool import tool
import random
from datetime import datetime
from typing import Literal, Optional
@tool()
def calculate(operation: Literal["add", "subtract", "multiply", "divide"],
a: float,
b: float) -> float:
"""
Perform a basic calculation
Args:
operation: The math operation to perform (add, subtract, multiply, divide)
a: First number
b: Second number
Returns:
The result of the calculation
"""
match operation:
case "add":
return a + b
case "subtract":
return a - b
case "multiply":
return a * b
case "divide":
if b == 0:
raise ValueError("Cannot divide by zero")
return a / b
@tool()
def get_current_time(timezone: Optional[str] = None) -> str:
"""
Get the current date and time
Args:
timezone: Optional timezone (not implemented in this example)
Returns:
The current date and time as a string
"""
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@tool()
def get_random_number(min_value: int = 1, max_value: int = 100) -> int:
"""
Generate a random number in the given range
Args:
min_value: Minimum value (inclusive)
max_value: Maximum value (inclusive)
Returns:
A random integer
"""
return random.randint(min_value, max_value)
Creating the Assistant#
Now, let’s create an assistant that uses these tools:
import asyncio
import os
from brain.agents.agent import Agent
from brain.agents.llm.openai import OpenAIBaseLLM
async def main():
# Initialize the LLM
llm = OpenAIBaseLLM(
api_key=os.environ.get("OPENAI_API_KEY"),
default_model="gpt-4o-mini"
)
# Create the agent with our tools
agent = Agent(
llm=llm,
tools=[calculate, get_current_time, get_random_number],
instructions="""
You are a helpful assistant that can perform calculations, tell the time,
and generate random numbers. Use the available tools to help the user.
"""
)
# Run a conversation loop
print("Simple Assistant (type 'exit' to quit)")
while True:
# Get user input
user_input = input("\nYou: ")
if user_input.lower() == "exit":
break
# Process with the agent
response = await agent.run(user_input)
print(f"\nAssistant: {response}")
if __name__ == "__main__":
asyncio.run(main())
Streaming Responses#
To make the assistant more responsive, we can add streaming support:
from brain.agents.callback import callback
@callback("message_stream.assistant")
async def stream_to_console(agent, event, stream):
print("\nAssistant: ", end="", flush=True)
async for chunk in stream:
if hasattr(chunk, "chunk"):
print(chunk.chunk, end="", flush=True)
# Update the agent creation to include the callback
agent = Agent(
llm=llm,
tools=[calculate, get_current_time, get_random_number],
instructions="""
You are a helpful assistant that can perform calculations, tell the time,
and generate random numbers. Use the available tools to help the user.
""",
callbacks=[stream_to_console]
)
# In the conversation loop, don't print the response again
# since it's already printed by the callback
async def main():
# ...
while True:
user_input = input("\nYou: ")
if user_input.lower() == "exit":
break
# Process with the agent (response is streamed by callback)
await agent.run(user_input)
print() # Add a newline after the response
Complete Example#
Here’s the complete example with streaming support:
import asyncio
import os
import random
from datetime import datetime
from typing import Literal, Optional
from brain.agents.agent import Agent
from brain.agents.callback import callback
from brain.agents.llm.openai import OpenAIBaseLLM
from brain.agents.tool import tool
# Define tools
@tool()
def calculate(operation: Literal["add", "subtract", "multiply", "divide"],
a: float,
b: float) -> float:
"""Perform a basic calculation"""
match operation:
case "add":
return a + b
case "subtract":
return a - b
case "multiply":
return a * b
case "divide":
if b == 0:
raise ValueError("Cannot divide by zero")
return a / b
@tool()
def get_current_time(timezone: Optional[str] = None) -> str:
"""Get the current date and time"""
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@tool()
def get_random_number(min_value: int = 1, max_value: int = 100) -> int:
"""Generate a random number in the given range"""
return random.randint(min_value, max_value)
# Define streaming callback
@callback("message_stream.assistant")
async def stream_to_console(agent, event, stream):
print("\nAssistant: ", end="", flush=True)
async for chunk in stream:
if hasattr(chunk, "chunk"):
print(chunk.chunk, end="", flush=True)
async def main():
# Initialize the LLM
llm = OpenAIBaseLLM(
api_key=os.environ.get("OPENAI_API_KEY"),
default_model="gpt-4o-mini"
)
# Create the agent with our tools and streaming callback
agent = Agent(
llm=llm,
tools=[calculate, get_current_time, get_random_number],
instructions="""
You are a helpful assistant that can perform calculations, tell the time,
and generate random numbers. Use the available tools to help the user.
""",
callbacks=[stream_to_console]
)
# Run a conversation loop
print("Simple Assistant (type 'exit' to quit)")
while True:
# Get user input
user_input = input("\nYou: ")
if user_input.lower() == "exit":
break
# Process with the agent (response is streamed by callback)
await agent.run(user_input)
print() # Add a newline after the response
if __name__ == "__main__":
asyncio.run(main())