import { createAgent, trimMessages, type AgentState,} from "langchain";import { MemorySaver } from "@langchain/langgraph";// This function will be called every time before the node that calls LLMconst stateModifier = async (state: AgentState) => { return { messages: await trimMessages(state.messages, { strategy: "last", maxTokens: 384, startOn: "human", endOn: ["human", "tool"], tokenCounter: (msgs) => msgs.length, }), };};const checkpointer = new MemorySaver();const agent = createAgent({ model: "openai:gpt-5", tools: [], preModelHook: stateModifier, checkpointer,});
[['human', "hi! I'm bob"]][['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?']][['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?'], ['human', "what's my name?"]][['human', "hi! I'm bob"], ['ai', 'Hi Bob! How are you doing today? Is there anything I can help you with?'], ['human', "what's my name?"], ['ai', 'Your name is Bob.']][['human', "what's my name?"], ['ai', 'Your name is Bob.']]
import { createAgent, summarizationMiddleware } from "langchain";import { MemorySaver } from "@langchain/langgraph";const checkpointer = new MemorySaver();const agent = createAgent({ model: "openai:gpt-4o", tools: [], middleware: [ summarizationMiddleware({ model: "openai:gpt-4o-mini", maxTokensBeforeSummary: 4000, messagesToKeep: 20, }), ], checkpointer,});const config = { configurable: { thread_id: "1" } };await agent.invoke({ messages: "hi, my name is bob" }, config);await agent.invoke({ messages: "write a short poem about cats" }, config);await agent.invoke({ messages: "now do the same but for dogs" }, config);const finalResponse = await agent.invoke({ messages: "what's my name?" }, config);console.log(finalResponse.messages.at(-1)?.content);// Your name is Bob!
import * as z from "zod";import { createAgent, tool, SystemMessage } from "langchain";const contextSchema = z.object({ userName: z.string(),});const getWeather = tool( async ({ city }, config) => { return `The weather in ${city} is always sunny!`; }, { name: "get_weather", description: "Get user info", schema: z.object({ city: z.string(), }), });const agent = createAgent({ model: "openai:gpt-5-nano", tools: [getWeather], contextSchema, prompt: (state, config) => { return [ new SystemMessage( `You are a helpful assistant. Address the user as ${config.context?.userName}.` ), ...state.messages, },});const result = await agent.invoke( { messages: [{ role: "user", content: "What is the weather in SF?" }], }, { context: { userName: "John Smith", }, });for (const message of result.messages) { console.log(message);}/** * HumanMessage { * "content": "What is the weather in SF?", * // ... * } * AIMessage { * // ... * "tool_calls": [ * { * "name": "get_weather", * "args": { * "city": "San Francisco" * }, * "type": "tool_call", * "id": "call_tCidbv0apTpQpEWb3O2zQ4Yx" * } * ], * // ... * } * ToolMessage { * "content": "The weather in San Francisco is always sunny!", * "tool_call_id": "call_tCidbv0apTpQpEWb3O2zQ4Yx" * // ... * } * AIMessage { * "content": "John Smith, here's the latest: The weather in San Francisco is always sunny!\n\nIf you'd like more details (temperature, wind, humidity) or a forecast for the next few days, I can pull that up. What would you like?", * // ... * } */