from cerebrum.llm.communication import LLMQuery # Using LLMQuery as an example
Construct system instructions
Here's how to set up your agent's system instructions and you need to put this function inside your agent class
def build_system_instruction(self):
prefix = "".join(["".join(self.config["description"])])
plan_instruction = "".join(
[
f"You are given the available tools from the tool list: {json.dumps(self.tool_info)} to help you solve problems. ",
"Generate a plan with comprehensive yet minimal steps to fulfill the task. ",
"The plan must follow the json format as below: ",
"[",
'{"action_type": "action_type_value", "action": "action_value","tool_use": [tool_name1, tool_name2,...]}',
'{"action_type": "action_type_value", "action": "action_value", "tool_use": [tool_name1, tool_name2,...]}',
"...",
"]",
"In each step of the planned plan, identify tools to use and recognize no tool is necessary. ",
"Followings are some plan examples. ",
"[" "[",
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
'{"action_type": "chat", "action": "write a summarization based on the gathered information. ", "tool_use": []}',
"];",
"[",
'{"action_type": "tool_use", "action": "gather information from arxiv. ", "tool_use": ["arxiv"]},',
'{"action_type": "chat", "action": "understand the current methods and propose ideas that can improve ", "tool_use": []}',
"]",
"]",
]
)
if self.workflow_mode == "manual":
self.messages.append({"role": "system", "content": prefix})
else:
assert self.workflow_mode == "automatic"
self.messages.append({"role": "system", "content": prefix})
self.messages.append({"role": "user", "content": plan_instruction})
Create Workflows
You can create a workflow for the agent to execute its task and you need to put this function inside your agent class.
Manual workflow example:
def manual_workflow(self):
workflow = [
{
"action_type": "tool_use",
"action": "Search for relevant papers",
"tool_use": ["demo_author/arxiv"],
},
{
"action_type": "chat",
"action": "Provide responses based on the user's query",
"tool_use": [],
},
]
return workflow
Implement the Run Method
Finally, implement the run method to execute your agent's workflow and you need to put this function inside your agent class.
def run(self):
self.build_system_instruction()
task_input = self.task_input
self.messages.append({"role": "user", "content": task_input})
workflow = None
if self.workflow_mode == "automatic":
workflow = self.automatic_workflow()
self.messages = self.messages[:1] # clear long context
else:
assert self.workflow_mode == "manual"
workflow = self.manual_workflow()
self.messages.append(
{
"role": "user",
"content": f"[Thinking]: The workflow generated for the problem is {json.dumps(workflow)}. Follow the workflow to solve the problem step by step. ",
}
)
try:
if workflow:
final_result = ""
for i, step in enumerate(workflow):
action_type = step["action_type"]
action = step["action"]
tool_use = step["tool_use"]
prompt = f"At step {i + 1}, you need to: {action}. "
self.messages.append({"role": "user", "content": prompt})
if tool_use:
selected_tools = self.pre_select_tools(tool_use)
else:
selected_tools = None
response = self.send_request(
agent_name=self.agent_name,
query=LLMQuery(
messages=self.messages,
tools=selected_tools,
action_type=action_type,
),
)["response"]
self.messages.append({"role": "assistant", "content": response.response_message})
self.rounds += 1
final_result = self.messages[-1]["content"]
return {
"agent_name": self.agent_name,
"result": final_result,
"rounds": self.rounds,
}
else:
return {
"agent_name": self.agent_name,
"result": "Failed to generate a valid workflow in the given times.",
"rounds": self.rounds,
}
except Exception as e:
return {}
Similar as developing new agents, developing tools also need to follow a simple directory structure:
demo_author/
└── demo_tool/
│── entry.py # Contains your tool's main logic
└── config.json # Tool configuration and metadata
Setting up config.json
Your tool needs a configuration file that describes its properties. Here's an example of how to set it up:
{
"name": "demo_tool",
"description": [
"The arxiv tool that can be used to search for papers on arxiv"
],
"meta": {
"author": "demo_author",
"version": "1.0.6",
"license": "CC0"
},
"build": {
"entry": "tool.py",
"module": "DemoTool"
}
}
Create Tool Class
In entry.py, you'll need to implement a tool class which is identified in the config.json with two essential methods:
get_tool_call_format: Defines how LLMs should interact with your tool
run: Contains your tool's main functionality
Here's an example:
class Arxiv:
def get_tool_call_format(self):
tool_call_format = {
"type": "function",
"function": {
"name": "demo_author/arxiv",
"description": "Query articles or topics in arxiv",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Input query that describes what to search in arxiv"
}
},
"required": [
"query"
]
}
}
}
return tool_call_format
def run(self, params: dict):
"""
Main tool logic goes here.
Args:
params: Dictionary containing tool parameters
Returns:
Your tool's output
"""
# Your code here
result = do_something(params['param_name'])
return result
Integration Tips
When integrating your tool for the agents you develop:
Use absolute paths to reference your tool in agent configurations
Example: /path/to/your/tools/example/your_tool instead of just author/tool_name