Documentation Index
Fetch the complete documentation index at: https://docs.galileo.ai/llms.txt
Use this file to discover all available pages before exploring further.
GalileoAsyncCallback
Async Langchain callback handler for logging traces to the Galileo platform.
Arguments
_handler (GalileoAsyncBaseHandler): The async handler for managing the trace.
on_agent_finish
async def on_agent_finish(self,
finish: AgentFinish,
*,
run_id: UUID,
**kwargs: Any) -> Any
Langchain callback when an agent finishes.
on_chain_end
async def on_chain_end(self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a chain ends.
on_chain_error
async def on_chain_error(self,
error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a chain errors.
on_chain_start
async def on_chain_start(self,
serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
tags: Optional[list[str]]=None,
**kwargs: Any) -> Any
Langchain callback when a chain starts.
on_chat_model_start
async def on_chat_model_start(self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
tags: Optional[list[str]]=None,
metadata: Optional[dict[str, Any]]=None,
**kwargs: Any) -> Any
Langchain callback when a chat model starts.
on_llm_end
async def on_llm_end(self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when an LLM node ends.
on_llm_error
async def on_llm_error(self,
error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when an LLM errors.
on_llm_new_token
async def on_llm_new_token(self, token: str, *, run_id: UUID, **kwargs: Any) -> Any
Langchain callback when an LLM node generates a new token.
on_llm_start
async def on_llm_start(self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
tags: Optional[list[str]]=None,
metadata: Optional[dict[str, Any]]=None,
**kwargs: Any) -> Any
Langchain callback when an LLM node starts.
Note: This callback is only used for non-chat models.
on_retriever_end
async def on_retriever_end(self,
documents: list[Document],
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a retriever node ends.
on_retriever_error
async def on_retriever_error(self,
error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a retriever errors.
on_retriever_start
async def on_retriever_start(self,
serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
tags: Optional[list[str]]=None,
metadata: Optional[dict[str, Any]]=None,
**kwargs: Any) -> Any
Langchain callback when a retriever node starts.
async def on_tool_end(self,
output: Any,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a tool node ends.
async def on_tool_error(self,
error: Exception,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
**kwargs: Any) -> Any
Langchain callback when a tool errors.
async def on_tool_start(self,
serialized: dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID]=None,
tags: Optional[list[str]]=None,
metadata: Optional[dict[str, Any]]=None,
**kwargs: Any) -> Any
Langchain callback when a tool node starts.