langchain using gemini flash 2.5

We can use langchain  to build our mcp server


import os
from typing import Annotated, TypedDict, List
# CHANGE 1: Import Google GenAI instead of OpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.tools import tool
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages

# --- SETUP ---
# CHANGE 2: Use GOOGLE_API_KEY instead of OPENAI_API_KEY
os.environ["GOOGLE_API_KEY"] = "your-google-api-key-here"

# --- 1. DEFINE TOOLS ---
@tool
def get_stock_price(symbol: str) -> str:
    """Get the current stock price for a given ticker symbol."""
    price = 150.00 if symbol.upper() == "AAPL" else 100.00
    return f"The current price of {symbol.upper()} is ${price}"

tools = [get_stock_price]

# CHANGE 3: Initialize Gemini Model
# 'gemini-1.5-flash' is fast and cheap for testing
llm = ChatGoogleGenerativeAI(
    model="gemini-2.5-flash",
    google_api_key=os.environ["GOOGLE_API_KEY"]
).bind_tools(tools)

# --- 2. DEFINE STATE ---
class State(TypedDict):
    messages: Annotated[List, add_messages]

# --- 3. DEFINE NODES ---
def model_node(state: State):
    """The LLM decides what to do."""
    response = llm.invoke(state["messages"])
    return {"messages": [response]}

def tool_node(state: State):
    """Executes the tool if the LLM requested it."""
    messages = state["messages"]
    last_message = messages[-1]
   
    # LangChain standardizes tool_calls across providers (OpenAI, Gemini, etc.)
    if not last_message.tool_calls:
        return {"messages": []}
   
    outputs = []
    for tool_call in last_message.tool_calls:
        # Invoke the actual Python function
        tool_result = get_stock_price.invoke(tool_call["args"])
        outputs.append({
            "role": "tool",
            "content": tool_result,
            "tool_call_id": tool_call["id"]
        })
    return {"messages": outputs}

# --- 4. DEFINE EDGES ---
def should_continue(state: State):
    """Check if the LLM wants to call a tool or end."""
    last_message = state["messages"][-1]
    if last_message.tool_calls:
        return "tools"
    return END

# --- 5. BUILD THE GRAPH ---
workflow = StateGraph(State)

workflow.add_node("agent", model_node)
workflow.add_node("tools", tool_node)

workflow.set_entry_point("agent")

workflow.add_conditional_edges(
    "agent",
    should_continue,
    {
        "tools": "tools",
        END: END
    }
)
workflow.add_edge("tools", "agent")

app = workflow.compile()

# --- 6. RUN THE AGENT ---
if __name__ == "__main__":
    print("--- Starting Gemini Agent ---")
    inputs = {"messages": [("user", "What is the price of AAPL?")]}
   
    for event in app.stream(inputs):
        for key, value in event.items():
            print(f"--- Node: {key} ---")
            # Safe print for content
            msg = value["messages"][-1]
            print(msg.content if hasattr(msg, 'content') else "Tool Executed")
           
    print("\n--- Final Answer ---")
    final_state = app.invoke(inputs)
    print(final_state["messages"][-1].content)

And the results would look something like this 


Comparing the code above with how agent_engine would initiate and bind the tools 

from vertexai import agent_engines

agent = agent_engines.LanggraphAgent(
    model="gemini-2.0-flash",
    tools=[get_exchange_rate],
    model_kwargs={
        "temperature": 0.28,
        "max_output_tokens": 1000,
        "top_p": 0.95,
    },
)

Troubleshooting

Sometimes if the model is not supported by your account, you will get 429 error saying that either the model not found or you have reach your rate limit. In the code above, you can see that I am using "gemini-2.5-flash"  and if the model not available to you for some reason - you will get an error. You can easily check here if it is due to rate limit. This also give you an idea of what model is allowed 

https://aistudio.google.com/app/rate-limit?timeRange=last-28-days.





Comments

Popular posts from this blog

vllm : Failed to infer device type

NodeJS: Error: spawn EINVAL in window for node version 20.20 and 18.20

android studio kotlin source is null error