LangChain is the most popular framework for building applications powered by large language models. It provides a standard interface for chains, agents, and memory, making it easy to build complex AI applications.Since the Morpheus Inference API is fully OpenAI-compatible, you can use it with LangChain by simply configuring the OpenAI provider with a custom base URL.
import osfrom langchain_openai import ChatOpenAI# Configure the Morpheus-powered LLMllm = ChatOpenAI( model="llama-3.3-70b", api_key=os.getenv("MORPHEUS_API_KEY"), base_url="https://api.mor.org/api/v1")# Simple invocationresponse = llm.invoke("What is the capital of France?")print(response.content)
from langchain_openai import ChatOpenAIllm = ChatOpenAI( model="llama-3.3-70b", api_key=os.getenv("MORPHEUS_API_KEY"), base_url="https://api.mor.org/api/v1", streaming=True)for chunk in llm.stream("Write a short poem about AI"): print(chunk.content, end="", flush=True)
from langchain_openai import ChatOpenAIfrom langchain.agents import create_tool_calling_agent, AgentExecutorfrom langchain_core.prompts import ChatPromptTemplatefrom langchain_core.tools import toolllm = ChatOpenAI( model="llama-3.3-70b", api_key=os.getenv("MORPHEUS_API_KEY"), base_url="https://api.mor.org/api/v1")@tooldef get_weather(city: str) -> str: """Get the current weather for a city.""" # Your weather API implementation return f"The weather in {city} is sunny, 72°F"@tooldef calculate(expression: str) -> str: """Evaluate a mathematical expression.""" return str(eval(expression))tools = [get_weather, calculate]prompt = ChatPromptTemplate.from_messages([ ("system", "You are a helpful assistant with access to tools."), ("user", "{input}"), ("placeholder", "{agent_scratchpad}")])agent = create_tool_calling_agent(llm, tools, prompt)agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)response = agent_executor.invoke({ "input": "What's the weather in Tokyo and what is 15 * 23?"})print(response["output"])
import { ChatOpenAI } from "@langchain/openai";const llm = new ChatOpenAI({ model: "llama-3.3-70b", apiKey: process.env.MORPHEUS_API_KEY, configuration: { baseURL: "https://api.mor.org/api/v1", },});const response = await llm.invoke("What is the capital of France?");console.log(response.content);