Frameworks
Framework integration for the Agentuity Python SDK
The Agentuity Python SDK provides a flexible and powerful way to integrate other frameworks with your Agent.
The following frameworks are currently supported:
Using Frameworks with Agentuity
To use a framework with Agentuity, choose the framework template when creating a new project.
agentuity create
When you select one of the framework templates, the Agentuity CLI will install the necessary dependencies and create a new project with the framework already configured.
CrewAI
Example Agent using CrewAI:
from agentuity import AgentRequest, AgentResponse, AgentContext
from agents.Concierge.crew import Concierge
async def run(
request: AgentRequest,
response: AgentResponse,
context: AgentContext,
):
prompt = await request.data.text() or "Recommend dinner spots in Austin, TX"
concierge = Concierge()
crew = concierge.crew(prompt)
result = crew.kickoff()
return response.text(str(result))
Then for your Crew, you might have the following:
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
@CrewBase
class Concierge:
"""Concierge crew"""
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def intent_analyzer(self) -> Agent:
return Agent(config=self.agents_config["intent_analyzer"], verbose=True)
@agent
def local_guide(self) -> Agent:
return Agent(config=self.agents_config["local_guide"], verbose=True)
@task
def local_guide_task(self, query: str = "") -> Task:
config = self.tasks_config["local_guide_task"].copy()
config["description"] = config["description"].format(query=query)
return Task(config=config)
@crew
def crew(self, query: str) -> Crew:
return Crew(
agents=[self.local_guide()],
tasks=[self.local_guide_task(query)],
process=Process.hierarchical,
manager_agent=self.intent_analyzer(),
verbose=True,
)
LangChain
Example Agent using LangChain:
from agentuity import AgentRequest, AgentResponse, AgentContext
from langchain_anthropic import ChatAnthropic
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
prompt = await request.data.text() or "Recommend dinner spots in Austin, TX"
# Get user intent
template = ChatPromptTemplate.from_messages(
[
(
"system",
"""You serve as a central hub that routes user requests to the right
AI agent based on the user's intent. Classify the user's intent and
select the best agent to handle it: for now, just LocalGuide.
Respond ONLY with the agent name."""
),
("user", "{input}"),
]
)
llm = ChatAnthropic(model="claude-3-7-sonnet-latest", max_retries=5)
output_parser = StrOutputParser()
chain = template | llm | output_parser
user_intent = await chain.ainvoke({"input": prompt})
# Route to appropriate agent based on intent
if user_intent == "LocalGuide":
return await response.handoff(
{ "name": "LocalGuide" },
prompt
)
else:
return response.text("Sorry, I don't know how to help with that.")
LlamaIndex
Example Agent using LlamaIndex:
from agentuity import AgentRequest, AgentResponse, AgentContext
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.anthropic import Anthropic
async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
prompt = await request.data.text() or "Recommend dinner spots in Austin, TX"
# Get user intent
agent = AgentWorkflow.from_tools_or_functions(
[],
llm=Anthropic(model="claude-3-7-sonnet-latest", max_retries=3),
system_prompt="""You serve as a central hub that routes user requests to the right
AI agent based on the user's intent. Classify the user's intent and
select the best agent to handle it: for now, just LocalGuide.
Respond ONLY with the agent name.""",
)
user_intent = await agent.run(prompt)
# Route to appropriate agent based on intent
if str(user_intent) == "LocalGuide":
return await response.handoff(
{ "name": "LocalGuide" },
prompt
)
else:
return response.text("Sorry, I don't know how to help with that.")
Pydantic
Example Agent using Pydantic:
from agentuity import AgentRequest, AgentResponse, AgentContext
from pydantic_ai import Agent
async def run(request: AgentRequest, response: AgentResponse, context: AgentContext):
prompt = await request.data.text() or "Recommend dinner spots in Austin, TX"
# Get user intent
agent = Agent(
"claude-3-7-sonnet-latest",
output_type=str,
system_prompt=(
"""You serve as a central hub that routes user requests to the right
AI agent based on the user's intent. Classify the user's intent and
select the best agent to handle it: for now, just LocalGuide.
Respond ONLY with the agent name."""
),
)
intent_result = await agent.run(prompt)
# Route to appropriate agent based on intent
if intent_result.output == "LocalGuide":
return await response.handoff(
{ "name": "LocalGuide" },
prompt
)
else:
return response.text("Sorry, I don't know how to help with that.")
Need Help?
Join our Community for assistance or just to hang with other humans building agents.
Send us an email at hi@agentuity.com if you'd like to get in touch.
Please Follow us on
If you haven't already, please Signup for your free account now and start building your first agent!