AgentTokenBufferMemory#

class langchain.agents.openai_functions_agent.agent_token_buffer_memory.AgentTokenBufferMemory[source]#

Bases: BaseChatMemory

Memory used to save agent output AND intermediate steps.

Parameters:
  • human_prefix – Prefix for human messages. Default is “Human”.

  • ai_prefix – Prefix for AI messages. Default is “AI”.

  • llm – Language model.

  • memory_key – Key to save memory under. Default is “history”.

  • max_token_limit – Maximum number of tokens to keep in the buffer. Once the buffer exceeds this many tokens, the oldest messages will be pruned. Default is 12000.

  • return_messages – Whether to return messages. Default is True.

  • output_key – Key to save output under. Default is “output”.

  • intermediate_steps_key – Key to save intermediate steps under. Default is “intermediate_steps”.

  • format_as_tools – Whether to format as tools. Default is False.

param ai_prefix: str = 'AI'#
param chat_memory: BaseChatMessageHistory [Optional]#
param format_as_tools: bool = False#
param human_prefix: str = 'Human'#
param input_key: str | None = None#
param intermediate_steps_key: str = 'intermediate_steps'#
param llm: BaseLanguageModel [Required]#
param max_token_limit: int = 12000#

The max number of tokens to keep in the buffer. Once the buffer exceeds this many tokens, the oldest messages will be pruned.

param memory_key: str = 'history'#
param output_key: str = 'output'#
param return_messages: bool = True#
async aclear() None#

Clear memory contents.

Return type:

None

async aload_memory_variables(inputs: Dict[str, Any]) Dict[str, Any]#

Async return key-value pairs given the text input to the chain.

Parameters:

inputs (Dict[str, Any]) – The inputs to the chain.

Returns:

A dictionary of key-value pairs.

Return type:

Dict[str, Any]

async asave_context(inputs: Dict[str, Any], outputs: Dict[str, str]) None#

Save context from this conversation to buffer.

Parameters:
  • inputs (Dict[str, Any])

  • outputs (Dict[str, str])

Return type:

None

clear() None#

Clear memory contents.

Return type:

None

load_memory_variables(inputs: Dict[str, Any]) Dict[str, Any][source]#

Return history buffer.

Parameters:

inputs (Dict[str, Any]) – Inputs to the agent.

Returns:

A dictionary with the history buffer.

Return type:

Dict[str, Any]

save_context(inputs: Dict[str, Any], outputs: Dict[str, Any]) None[source]#

Save context from this conversation to buffer. Pruned.

Parameters:
  • inputs (Dict[str, Any]) – Inputs to the agent.

  • outputs (Dict[str, Any]) – Outputs from the agent.

Return type:

None

property buffer: List[BaseMessage]#

String buffer of memory.