Spaces:
Sleeping
Sleeping
File size: 1,504 Bytes
df9ce44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from langchain_groq import ChatGroq
from langchain_tavily import TavilySearch
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt import create_react_agent
from loguru import logger
import os
from dotenv import load_dotenv
load_dotenv()
# =======================
# 1. LLM MODEL (Groq) - Optimized for Low Latency
# =======================
model = ChatGroq(
model="openai/gpt-oss-20b", # Faster than gpt-oss-20b
max_tokens=256, # Reduced from 512 for faster responses
api_key=os.getenv("GROQ_API_KEY"),
temperature=0.7,
)
# =======================
# 2. TAVILY SEARCH TOOL - Optimized for Speed
# =======================
tavily_tool = TavilySearch(
max_results=2, # Reduced from 5 for faster responses
topic="general",
api_key=os.getenv("TAVILY_API_KEY")
)
tools = [tavily_tool] # 🔥 Replace math tools with Tavily
# =======================
# 3. SYSTEM PROMPT - Optimized for Speed
# =======================
system_prompt = """
You are Samantha, a helpful assistant. Use Tavily for factual or current information.
Keep responses brief and conversational for audio playback.
"""
# =======================
# 4. MEMORY
# =======================
memory = InMemorySaver()
# =======================
# 5. BUILD THE AGENT
# =======================
agent = create_react_agent(
model=model,
tools=tools,
prompt=system_prompt,
checkpointer=memory,
)
agent_config = {
"configurable": {
"thread_id": "default_user"
}
}
|