Source code for reminix.adapters.llamaindex.agent
"""
LlamaIndex Agent Adapter
Wraps a LlamaIndex Agent (ReActAgent, OpenAIAgent, etc.) for use with the Reminix runtime.
Compatibility:
llama-index-core >= 0.14.0
"""
from __future__ import annotations
from typing import Any
from ..protocols import LlamaIndexAgentProtocol
from reminix.runtime import Agent
[docs]
def from_agent(
llama_agent: LlamaIndexAgentProtocol,
*,
name: str,
metadata: dict[str, Any] | None = None,
) -> Agent:
"""
Create a Reminix Agent from a LlamaIndex Agent.
Works with ReActAgent, OpenAIAgent, and other LlamaIndex agent types.
Args:
llama_agent: A LlamaIndex Agent instance (ReActAgent, OpenAIAgent, etc.).
name: Name for the Reminix agent.
metadata: Optional metadata for the agent.
Returns:
A Reminix Agent that wraps the LlamaIndex agent.
Example::
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
from reminix.adapters.llamaindex import from_agent
from reminix.runtime import serve
# Define tools
def multiply(a: int, b: int) -> int:
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
# Create agent
llm = OpenAI(model="gpt-4o")
react_agent = ReActAgent.from_tools([multiply_tool], llm=llm)
# Wrap and serve
agent = from_agent(react_agent, name="calculator")
serve(agent)
"""
agent = Agent(
name,
metadata={
"framework": "llamaindex",
"adapter": "agent",
**(metadata or {}),
},
)
@agent.invoke # type: ignore[arg-type]
async def handle_invoke(input_data: dict[str, Any], ctx: dict[str, Any]) -> dict[str, Any]:
"""Non-streaming invoke via LlamaIndex Agent."""
message = _extract_message(input_data)
response = await llama_agent.achat(message)
return {"output": str(response)}
@agent.invoke_stream # type: ignore[arg-type]
async def handle_invoke_stream(input_data: dict[str, Any], ctx: dict[str, Any]):
"""Streaming invoke via LlamaIndex Agent."""
message = _extract_message(input_data)
response = await llama_agent.astream_chat(message) # type: ignore[attr-defined]
async for token in response.async_response_gen():
yield {"chunk": token}
@agent.chat # type: ignore[arg-type]
async def handle_chat(messages: list[dict[str, Any]], ctx: dict[str, Any]) -> dict[str, Any]:
"""Non-streaming chat via LlamaIndex Agent."""
# Reset and use the last user message
llama_agent.reset() # type: ignore[attr-defined]
last_message = ""
for msg in messages:
if msg.get("role") == "user":
last_message = msg.get("content", "")
response = await llama_agent.achat(last_message)
return {"message": {"role": "assistant", "content": str(response)}}
@agent.chat_stream # type: ignore[arg-type]
async def handle_chat_stream(messages: list[dict[str, Any]], ctx: dict[str, Any]):
"""Streaming chat via LlamaIndex Agent."""
llama_agent.reset() # type: ignore[attr-defined]
last_message = ""
for msg in messages:
if msg.get("role") == "user":
last_message = msg.get("content", "")
response = await llama_agent.astream_chat(last_message) # type: ignore[attr-defined]
async for token in response.async_response_gen():
yield {"chunk": token}
return agent
def _extract_message(input_data: dict[str, Any]) -> str:
"""Extract message string from input data."""
if "message" in input_data:
return str(input_data["message"])
if "input" in input_data:
return str(input_data["input"])
if "query" in input_data:
return str(input_data["query"])
if "prompt" in input_data:
return str(input_data["prompt"])
return str(input_data)