LangChain — Building LLM Applications
LangChain is the most popular framework for building LLM-powered applications. It provides abstractions for chaining prompts, managing conversation memory, building agents that use tools, and connecting LLMs to data sources and APIs.
LangChain Chains, Memory & Agents
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Chroma
from langchain.tools import tool
from langchain.agents import AgentExecutor, create_openai_tools_agent
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# 1. LCEL (LangChain Expression Language) — compose chains
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
# Chain: Prompt → LLM → Parse
prompt = ChatPromptTemplate.from_messages([
("system", "You are a {role}. Answer in {language}."),
("human", "{question}"),
])
chain = (
{"role": RunnablePassthrough(), "language": RunnablePassthrough(), "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
# chain.invoke({"role": "Python expert", "language": "English", "question": "What is a generator?"})
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# 2. CONVERSATION MEMORY
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
memory = ConversationBufferWindowMemory(
k=5, # remember last 5 exchanges
return_messages=True,
memory_key="chat_history",
)
chat_prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant. Keep track of the conversation context."),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
])
from langchain.chains import LLMChain
chat_chain = LLMChain(llm=llm, prompt=chat_prompt, memory=memory, verbose=False)
# Multi-turn conversation:
# chat_chain.predict(input="My name is Alice.")
# chat_chain.predict(input="What's my name?") # → "Your name is Alice."
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# 3. AGENTS — LLM decides which tools to use
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@tool
def calculate(expression: str) -> str:
"""Evaluate a mathematical expression. Input should be a valid Python math expression."""
import math
try:
return str(eval(expression, {"__builtins__": {}}, {"math": math, "sqrt": math.sqrt}))
except Exception as e:
return f"Error: {e}"
@tool
def get_stock_price(ticker: str) -> str:
"""Get the current stock price for a ticker symbol."""
# In production: call a real financial API
prices = {"AAPL": 182.50, "GOOGL": 143.20, "TSLA": 248.70, "NVDA": 875.40}
return f"${prices.get(ticker.upper(), 'N/A')}"
tools = [calculate, get_stock_price]
agent_prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful financial assistant. Use tools to answer accurately."),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
agent = create_openai_tools_agent(llm, tools, agent_prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, max_iterations=5)
result = agent_executor.invoke({"input": "If I bought 15 shares of NVDA, what's the total value? Also, what is 15 * 875.40?"})
print(result["output"])Tip
Tip
Practice LangChain Building LLM Applications in small, isolated examples before integrating into larger projects. Breaking concepts into small experiments builds genuine understanding faster than reading alone.
GPT-4 = strong reasoning. Claude = safety + long context. Gemini = multimodal. Llama = local/open.
Practice Task
Note
Practice Task — (1) Write a working example of LangChain Building LLM Applications from scratch without looking at notes. (2) Modify it to handle an edge case (empty input, null value, or error state). (3) Share your solution in the Priygop community for feedback.
Quick Quiz
Common Mistake
Warning
A common mistake with LangChain Building LLM Applications is skipping edge case testing — empty inputs, null values, and unexpected data types. Always validate boundary conditions to write robust, production-ready ai code.