Use streaming responses from OpenRouter’s Responses API.
Leverage the OpenRouter API for real-time response streaming across any model. This is used for low-latency chat user interface and dynamic agentic workflows.
from agno.agent import Agentfrom agno.models.openrouter import OpenRouterResponses# ---------------------------------------------------------------------------# Create Agent# ---------------------------------------------------------------------------agent = Agent( model=OpenRouterResponses(id="openai/gpt-oss-20b", reasoning={"enabled": True}), markdown=True,)# Stream the responseagent.print_response("Write a short poem about the moon", stream=True)# ---------------------------------------------------------------------------# Run Agent# ---------------------------------------------------------------------------if __name__ == "__main__": pass