Dec 2024
https://www.deeplearning.ai/short-courses/ai-agentic-design-patterns-with-autogen
from utils import get_openai_api_key
OPENAI_API_KEY = get_openai_api_key()
llm_config = {"model": "gpt-3.5-turbo"}
from autogen import ConversableAgent
agent = ConversableAgent(
name="chatbot",
llm_config=llm_config,
human_input_mode="NEVER",
)
reply = agent.generate_reply(
messages=[{"content": "Tell me a joke.", "role": "user"}]
)
print(reply)
reply = agent.generate_reply(
messages=[{"content": "Repeat the joke.", "role": "user"}]
)
print(reply)
Setting up a conversation between two agents, Cathy and Joe, where the memory of their interactions is retained.
cathy = ConversableAgent(
name="cathy",
system_message="Your name is Cathy and you are a stand-up comedian.",
llm_config=llm_config,
human_input_mode="NEVER",
)
joe = ConversableAgent(
name="joe",
system_message="Your name is Joe and you are a stand-up comedian. Start the next joke from the punchline of the previous joke.",
llm_config=llm_config,
human_input_mode="NEVER",
)
chat_result = joe.initiate_chat(
recipient=cathy,
message="I'm Joe. Cathy, let's keep the jokes rolling.",
max_turns=2,
)
import pprint
pprint.pprint(chat_result.chat_history)
pprint.pprint(chat_result.cost)
pprint.pprint(chat_result.summary)
chat_result = joe.initiate_chat(
cathy,
message="I'm Joe. Cathy, let's keep the jokes rolling.",
max_turns=2,
summary_method="reflection_with_llm",
summary_prompt="Summarize the conversation",
)
pprint.pprint(chat_result.summary)
Chat can be terminated using a termination conditions.
cathy = ConversableAgent(
name="cathy",
system_message="Your name is Cathy and you are a stand-up comedian. When you're ready to end the conversation, say 'I gotta go'.",
llm_config=llm_config,
human_input_mode="NEVER",
is_termination_msg=lambda msg: "I gotta go" in msg["content"],
)
joe = ConversableAgent(
name="joe",
system_message=
"Your name is Joe and you are a stand-up comedian. When you're ready to end the conversation, say 'I gotta go'.",
llm_config=llm_config,
human_input_mode="NEVER",
is_termination_msg=lambda msg: "I gotta go" in msg["content"] or "Goodbye" in msg["content"],
)
keep the conversation again
cathy.send(message="What's last joke we talked about?", recipient=joe)
llm_config={"model": "gpt-3.5-turbo"}
from autogen import ConversableAgent
onboarding_personal_information_agent = ConversableAgent(
name="Onboarding Personal Information Agent",
system_message='''You are a helpful customer onboarding agent,
you are here to help new customers get started with our product.
Your job is to gather customer's name and location.
Do not ask for other information. Return 'TERMINATE'
when you have gathered all the information.''',
llm_config=llm_config,
code_execution_config=False,
human_input_mode="NEVER",
)
onboarding_topic_preference_agent = ConversableAgent(
name="Onboarding Topic preference Agent",
system_message='''You are a helpful customer onboarding agent,
you are here to help new customers get started with our product.
Your job is to gather customer's preferences on news topics.
Do not ask for other information.
Return 'TERMINATE' when you have gathered all the information.''',
llm_config=llm_config,
code_execution_config=False,
human_input_mode="NEVER",
)
customer_engagement_agent = ConversableAgent(
name="Customer Engagement Agent",
system_message='''You are a helpful customer service agent
here to provide fun for the customer based on the user's
personal information and topic preferences.
This could include fun facts, jokes, or interesting stories.
Make sure to make it engaging and fun!
Return 'TERMINATE' when you are done.''',
llm_config=llm_config,
code_execution_config=False,
human_input_mode="NEVER",
is_termination_msg=lambda msg: "terminate" in msg.get("content").lower(),
)
customer_proxy_agent = ConversableAgent(
name="customer_proxy_agent",
llm_config=False,
code_execution_config=False,
human_input_mode="ALWAYS",
is_termination_msg=lambda msg: "terminate" in msg.get("content").lower(),
)
Create tasks to facilitate the onboarding process
chats = [
{
"sender": onboarding_personal_information_agent,
"recipient": customer_proxy_agent,
"message":
"Hello, I'm here to help you get started with our product."
"Could you tell me your name and location?",
"summary_method": "reflection_with_llm",
"summary_args": {
"summary_prompt" : "Return the customer information "
"into as JSON object only: "
"{'name': '', 'location': ''}",
},
"max_turns": 2,
"clear_history" : True
},
{
"sender": onboarding_topic_preference_agent,
"recipient": customer_proxy_agent,
"message":
"Great! Could you tell me what topics you are "
"interested in reading about?",
"summary_method": "reflection_with_llm",
"max_turns": 1,
"clear_history" : False
},
{
"sender": customer_proxy_agent,
"recipient": customer_engagement_agent,
"message": "Let's find something fun to read.",
"max_turns": 1,
"summary_method": "reflection_with_llm",
},
]
Start the onboarding process
from autogen import initiate_chats
chat_results = initiate_chats(chats)
for chat_result in chat_results:
print(chat_result.summary)
print("\n")
for chat_result in chat_results:
print(chat_result.cost)
print("\n")
llm_config = {"model": "gpt-3.5-turbo"}
task = '''Write a concise but engaging blogpost about DeepLearning.AI. Make sure the blogpost is within 100 words.'''
import autogen
writer = autogen.AssistantAgent(
name="Writer",
system_message="You are a writer. You write engaging and concise "
"blogpost (with title) on given topics. You must polish your "
"writing based on the feedback you receive and give a refined "
"version. Only return your final work without additional comments.",
llm_config=llm_config,
)
reply = writer.generate_reply(messages=[{"content": task, "role": "user"}])
print(reply)
Adding reflection
Create a critic agent to reflect on the work of the writer grant
critic = autogen.AssistantAgent(
name="Critic",
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
llm_config=llm_config,
system_message="You are a critic. You review the work of "
"the writer and provide constructive "
"feedback to help improve the quality of the content.",
)
res = critic.initiate_chat(
recipient=writer,
message=task,
max_turns=2,
summary_method="last_msg"
)
The critic can can focus on SEO, legal, ethical and meta.
Nested chat
SEO_reviewer = autogen.AssistantAgent(
name="SEO Reviewer",
llm_config=llm_config,
system_message="You are an SEO reviewer, known for "
"your ability to optimize content for search engines, "
"ensuring that it ranks well and attracts organic traffic. "
"Make sure your suggestion is concise (within 3 bullet points), "
"concrete and to the point. "
"Begin the review by stating your role.",
)
legal_reviewer = autogen.AssistantAgent(
name="Legal Reviewer",
llm_config=llm_config,
system_message="You are a legal reviewer, known for "
"your ability to ensure that content is legally compliant "
"and free from any potential legal issues. "
"Make sure your suggestion is concise (within 3 bullet points), "
"concrete and to the point. "
"Begin the review by stating your role.",
)
ethics_reviewer = autogen.AssistantAgent(
name="Ethics Reviewer",
llm_config=llm_config,
system_message="You are an ethics reviewer, known for "
"your ability to ensure that content is ethically sound "
"and free from any potential ethical issues. "
"Make sure your suggestion is concise (within 3 bullet points), "
"concrete and to the point. "
"Begin the review by stating your role. ",
)
meta_reviewer = autogen.AssistantAgent(
name="Meta Reviewer",
llm_config=llm_config,
system_message="You are a meta reviewer, you aggragate and review "
"the work of other reviewers and give a final suggestion on the content.",
)
def reflection_message(recipient, messages, sender, config):
return f'''Review the following content.
\n\n {recipient.chat_messages_for_summary(sender)[-1]['content']}'''
review_chats = [
{
"recipient": SEO_reviewer,
"message": reflection_message,
"summary_method": "reflection_with_llm",
"summary_args": {"summary_prompt" :
"Return review into as JSON object only:"
"{'Reviewer': '', 'Review': ''}. Here Reviewer should be your role",},
"max_turns": 1},
{
"recipient": legal_reviewer, "message": reflection_message,
"summary_method": "reflection_with_llm",
"summary_args": {"summary_prompt" :
"Return review into as JSON object only:"
"{'Reviewer': '', 'Review': ''}.",},
"max_turns": 1},
{"recipient": ethics_reviewer, "message": reflection_message,
"summary_method": "reflection_with_llm",
"summary_args": {"summary_prompt" :
"Return review into as JSON object only:"
"{'reviewer': '', 'review': ''}",},
"max_turns": 1},
{"recipient": meta_reviewer,
"message": "Aggregrate feedback from all reviewers and give final suggestions on the writing.",
"max_turns": 1},
]
critic.register_nested_chats(
review_chats,
trigger=writer,
)
res = critic.initiate_chat(
recipient=writer,
message=task,
max_turns=2,
summary_method="last_msg"
)
print(res.summary)
llm_config = {"model": "gpt-4-turbo"}
import chess
import chess.svg
from typing_extensions import Annotated
board = chess.Board()
made_move = False
def get_legal_moves() -> Annotated[str, "A list of legal moves in UCI format"]:
return "Possible moves are: " + ",".join([str(move) for move in board.legal_moves])
def make_move(move: Annotated[str, "A move in UCI format."]) -> Annotated[str, "Result of the move."]:
move = chess.Move.from_uci(move)
board.push_uci(str(move))
global made_move
made_move = True
# Display the board.
display(
chess.svg.board(
board,
arrows=[(move.from_square, move.to_square)],
fill={move.from_square: "gray"},
size=200
)
)
# Get the piece name.
piece = board.piece_at(move.to_square)
piece_symbol = piece.unicode_symbol()
piece_name = (
chess.piece_name(piece.piece_type).capitalize()
if piece_symbol.isupper()
else chess.piece_name(piece.piece_type)
)
return f"Moved {piece_name} ({piece_symbol}) from "\
f"{chess.SQUARE_NAMES[move.from_square]} to "\
f"{chess.SQUARE_NAMES[move.to_square]}."
from autogen import ConversableAgent
player_white = ConversableAgent(
name="Player White",
system_message="You are a chess player and you play as white. "
"First call get_legal_moves(), to get a list of legal moves. "
"Then call make_move(move) to make a move.",
llm_config=llm_config,
)
player_black = ConversableAgent(
name="Player Black",
system_message="You are a chess player and you play as black. "
"First call get_legal_moves(), to get a list of legal moves. "
"Then call make_move(move) to make a move.",
llm_config=llm_config,
)
def check_made_move(msg):
global made_move
if made_move:
made_move = False
return True
else:
return False
board_proxy = ConversableAgent(
name="Board Proxy",
llm_config=False,
is_termination_msg=check_made_move,
default_auto_reply="Please make a move.",
human_input_mode="NEVER",
)
Register tool
from autogen import register_function
for caller in [player_white, player_black]:
register_function(
get_legal_moves,
caller=caller,
executor=board_proxy,
name="get_legal_moves",
description="Get legal moves.",
)
register_function(
make_move,
caller=caller,
executor=board_proxy,
name="make_move",
description="Call this tool to make a move.",
)
player_black.llm_config["tools"]
Register the nested chat
player_white.register_nested_chats(
trigger=player_black,
chat_queue=[
{
"sender": board_proxy,
"recipient": player_white,
"summary_method": "last_msg",
}
],
)
player_black.register_nested_chats(
trigger=player_white,
chat_queue=[
{
"sender": board_proxy,
"recipient": player_black,
"summary_method": "last_msg",
}
],
)
Start the game
board = chess.Board()
chat_result = player_black.initiate_chat(
player_white,
message="Let's play chess! Your move.",
max_turns=2,
)
Add fun chitchat to the game
player_white = ConversableAgent(
name="Player White",
system_message="You are a chess player and you play as white. "
"First call get_legal_moves(), to get a list of legal moves. "
"Then call make_move(move) to make a move. "
"After a move is made, chitchat to make the game fun.",
llm_config=llm_config,
)
player_black = ConversableAgent(
name="Player Black",
system_message="You are a chess player and you play as black. "
"First call get_legal_moves(), to get a list of legal moves. "
"Then call make_move(move) to make a move. "
"After a move is made, chitchat to make the game fun.",
llm_config=llm_config,
)
for caller in [player_white, player_black]:
register_function(
get_legal_moves,
caller=caller,
executor=board_proxy,
name="get_legal_moves",
description="Get legal moves.",
)
register_function(
make_move,
caller=caller,
executor=board_proxy,
name="make_move",
description="Call this tool to make a move.",
)
player_white.register_nested_chats(
trigger=player_black,
chat_queue=[
{
"sender": board_proxy,
"recipient": player_white,
"summary_method": "last_msg",
"silent": True,
}
],
)
player_black.register_nested_chats(
trigger=player_white,
chat_queue=[
{
"sender": board_proxy,
"recipient": player_black,
"summary_method": "last_msg",
"silent": True,
}
],
)
board = chess.Board()
chat_result = player_black.initiate_chat(
player_white,
message="Let's play chess! Your move.",
max_turns=2,
)
llm_config = {"model": "gpt-4-turbo"}
from autogen.coding import LocalCommandLineCodeExecutor
executor = LocalCommandLineCodeExecutor(
timeout=60,
work_dir="coding",
)
from autogen import ConversableAgent, AssistantAgent
code_executor_agent = ConversableAgent(
name="code_executor_agent",
llm_config=False,
code_execution_config={"executor": executor},
human_input_mode="ALWAYS",
default_auto_reply="Please continue. If everything is done, reply 'TERMINATE'.",
)
code_writer_agent = AssistantAgent(
name="code_writer_agent",
llm_config=llm_config,
code_execution_config=False,
human_input_mode="NEVER",
)
code_writer_agent_system_message = code_writer_agent.system_message
print(code_writer_agent_system_message)
"""
You are a helpful AI assistant.
Solve tasks using your coding and language skills.
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
Reply "TERMINATE" in the end when everything is done.
"""
Ask the two agents to colloborate on a stock analysis task
import datetime
today = datetime.datetime.now().date()
message = f"Today is {today}. "\
"Create a plot showing stock gain YTD for NVDA and TLSA. "\
"Make sure the code is in markdown code block and save the figure"\
" to a file ytd_stock_gains.png."""
chat_result = code_executor_agent.initiate_chat(
code_writer_agent,
message=message,
)
# Press enter to end chat
import os
from IPython.display import Image
Image(os.path.join("coding", "ytd_stock_gains.png"))
you could also have it call functions
def get_stock_prices(stock_symbols, start_date, end_date):
"""Get the stock prices for the given stock symbols between
the start and end dates.
Args:
stock_symbols (str or list): The stock symbols to get the
prices for.
start_date (str): The start date in the format
'YYYY-MM-DD'.
end_date (str): The end date in the format 'YYYY-MM-DD'.
Returns:
pandas.DataFrame: The stock prices for the given stock
symbols indexed by date, with one column per stock
symbol.
"""
import yfinance
stock_data = yfinance.download(
stock_symbols, start=start_date, end=end_date
)
return stock_data.get("Close")
def plot_stock_prices(stock_prices, filename):
"""Plot the stock prices for the given stock symbols.
Args:
stock_prices (pandas.DataFrame): The stock prices for the
given stock symbols.
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
for column in stock_prices.columns:
plt.plot(
stock_prices.index, stock_prices[column], label=column
)
plt.title("Stock Prices")
plt.xlabel("Date")
plt.ylabel("Price")
plt.grid(True)
plt.savefig(filename)
executor = LocalCommandLineCodeExecutor(
timeout=60,
work_dir="coding",
functions=[get_stock_prices, plot_stock_prices],
)
code_writer_agent_system_message += executor.format_functions_for_prompt()
print(code_writer_agent_system_message)
'''
You are a helpful AI assistant.
Solve tasks using your coding and language skills.
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
Reply "TERMINATE" in the end when everything is done.
You have access to the following user defined functions. They can be accessed from the module called `functions` by their function names.
For example, if there was a function called `foo` you could import it by writing `from functions import foo`
def get_stock_prices(stock_symbols, start_date, end_date):
"""Get the stock prices for the given stock symbols between
the start and end dates.
Args:
stock_symbols (str or list): The stock symbols to get the
prices for.
start_date (str): The start date in the format
'YYYY-MM-DD'.
end_date (str): The end date in the format 'YYYY-MM-DD'.
Returns:
pandas.DataFrame: The stock prices for the given stock
symbols indexed by date, with one column per stock
symbol.
"""
...
def plot_stock_prices(stock_prices, filename):
"""Plot the stock prices for the given stock symbols.
Args:
stock_prices (pandas.DataFrame): The stock prices for the
given stock symbols.
"""
'''
code_writer_agent = ConversableAgent(
name="code_writer_agent",
system_message=code_writer_agent_system_message,
llm_config=llm_config,
code_execution_config=False,
human_input_mode="NEVER",
)
code_executor_agent = ConversableAgent(
name="code_executor_agent",
llm_config=False,
code_execution_config={"executor": executor},
human_input_mode="ALWAYS",
default_auto_reply=
"Please continue. If everything is done, reply 'TERMINATE'.",
)
chat_result = code_executor_agent.initiate_chat(
code_writer_agent,
message=f"Today is {today}."
"Download the stock prices YTD for NVDA and TSLA and create"
"a plot. Make sure the code is in markdown code block and "
"save the figure to a file stock_prices_YTD_plot.png.",
)
llm_config={"model": "gpt-4-turbo"}
import datetime
task = "Write a blogpost about the stock price performance of "\
f"Nvidia in the past month. Today's date is {datetime.datetime.now().strftime('%Y-%m-%d')}."
Build a group chat
User_proxy or Admin: to allow the user to comment on the report and ask the writer to refine it.
Planner: to determine relevant information needed to complete the task.
Engineer: to write code using the defined plan by the planner.
Executor: to execute the code written by the engineer.
Writer: to write the report.
import autogen
user_proxy = autogen.ConversableAgent(
name="Admin",
system_message="Give the task, and send "
"instructions to writer to refine the blog post.",
code_execution_config=False,
llm_config=llm_config,
human_input_mode="ALWAYS",
)
planner = autogen.ConversableAgent(
name="Planner",
system_message="Given a task, please determine "
"what information is needed to complete the task. "
"Please note that the information will all be retrieved using"
" Python code. Please only suggest information that can be "
"retrieved using Python code. "
"After each step is done by others, check the progress and "
"instruct the remaining steps. If a step fails, try to "
"workaround",
description="Planner. Given a task, determine what "
"information is needed to complete the task. "
"After each step is done by others, check the progress and "
"instruct the remaining steps",
llm_config=llm_config,
)
engineer = autogen.AssistantAgent(
name="Engineer",
llm_config=llm_config,
description="An engineer that writes code based on the plan "
"provided by the planner.",
)
Use an alternative method of code execution by providing a dict config - https://microsoft.github.io/autogen/0.2/docs/reference/agentchat/conversable_agent/
executor = autogen.ConversableAgent(
name="Executor",
system_message="Execute the code written by the "
"engineer and report the result.",
human_input_mode="NEVER",
code_execution_config={
"last_n_messages": 3,
"work_dir": "coding",
"use_docker": False,
},
)
writer = autogen.ConversableAgent(
name="Writer",
llm_config=llm_config,
system_message="Writer."
"Please write blogs in markdown format (with relevant titles)"
" and put the content in pseudo ```md``` code block. "
"You take feedback from the admin and refine your blog.",
description="Writer."
"Write blogs based on the code execution results and take "
"feedback from the admin to refine the blog."
)
Put into group chat
groupchat = autogen.GroupChat(
agents=[user_proxy, engineer, writer, executor, planner],
messages=[],
max_round=10,
)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
Start the group chat
groupchat_result = user_proxy.initiate_chat(manager, message=task)
Add speaker order
user_proxy = autogen.ConversableAgent(
name="Admin",
system_message="Give the task, and send "
"instructions to writer to refine the blog post.",
code_execution_config=False,
llm_config=llm_config,
human_input_mode="ALWAYS",
)
planner = autogen.ConversableAgent(
name="Planner",
system_message="Given a task, please determine "
"what information is needed to complete the task. "
"Please note that the information will all be retrieved using"
" Python code. Please only suggest information that can be "
"retrieved using Python code. "
"After each step is done by others, check the progress and "
"instruct the remaining steps. If a step fails, try to "
"workaround",
description="Given a task, determine what "
"information is needed to complete the task. "
"After each step is done by others, check the progress and "
"instruct the remaining steps",
llm_config=llm_config,
)
engineer = autogen.AssistantAgent(
name="Engineer",
llm_config=llm_config,
description="Write code based on the plan "
"provided by the planner.",
)
writer = autogen.ConversableAgent(
name="Writer",
llm_config=llm_config,
system_message="Writer. "
"Please write blogs in markdown format (with relevant titles)"
" and put the content in pseudo ```md``` code block. "
"You take feedback from the admin and refine your blog.",
description="After all the info is available, "
"write blogs based on the code execution results and take "
"feedback from the admin to refine the blog. ",
)
executor = autogen.ConversableAgent(
name="Executor",
description="Execute the code written by the "
"engineer and report the result.",
human_input_mode="NEVER",
code_execution_config={
"last_n_messages": 3,
"work_dir": "coding",
"use_docker": False,
},
)
groupchat = autogen.GroupChat(
agents=[user_proxy, engineer, writer, executor, planner],
messages=[],
max_round=10,
allowed_or_disallowed_speaker_transitions={
user_proxy: [engineer, writer, executor, planner],
engineer: [user_proxy, executor],
writer: [user_proxy, planner],
executor: [user_proxy, engineer, planner],
planner: [user_proxy, engineer, writer],
},
speaker_transitions_type="allowed",
)
manager = autogen.GroupChatManager(
groupchat=groupchat, llm_config=llm_config
)
groupchat_result = user_proxy.initiate_chat(
manager,
message=task,
)