Code
cookbook/11_models/meta/llama/async_basic.py
import asyncio
from agno.agent import Agent, RunOutput # noqa
from agno.models.meta import Llama
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
markdown=True
)
# Get the response in a variable
# run: RunOutput = asyncio.run(agent.arun("Share a 2 sentence horror story"))
# print(run.content)
# Print the response in the terminal
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
Was this page helpful?