Spaces:
Running
Running
| # from dotenv import load_dotenv | |
| # from openai import OpenAI | |
| # import json | |
| # import os | |
| # import requests | |
| # from pypdf import PdfReader | |
| # import gradio as gr | |
| # load_dotenv(override=True) | |
| # def push(text): | |
| # requests.post( | |
| # "https://api.pushover.net/1/messages.json", | |
| # data={ | |
| # "token": os.getenv("PUSHOVER_TOKEN"), | |
| # "user": os.getenv("PUSHOVER_USER"), | |
| # "message": text, | |
| # } | |
| # ) | |
| # def record_user_details(email, name="Name not provided", notes="not provided"): | |
| # push(f"Recording {name} with email {email} and notes {notes}") | |
| # return {"recorded": "ok"} | |
| # def record_unknown_question(question): | |
| # push(f"Recording {question}") | |
| # return {"recorded": "ok"} | |
| # record_user_details_json = { | |
| # "name": "record_user_details", | |
| # "description": "Use this tool to record that a user is interested in being in touch and provided an email address", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "email": { | |
| # "type": "string", | |
| # "description": "The email address of this user" | |
| # }, | |
| # "name": { | |
| # "type": "string", | |
| # "description": "The user's name, if they provided it" | |
| # } | |
| # , | |
| # "notes": { | |
| # "type": "string", | |
| # "description": "Any additional information about the conversation that's worth recording to give context" | |
| # } | |
| # }, | |
| # "required": ["email"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # record_unknown_question_json = { | |
| # "name": "record_unknown_question", | |
| # "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "question": { | |
| # "type": "string", | |
| # "description": "The question that couldn't be answered" | |
| # }, | |
| # }, | |
| # "required": ["question"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # tools = [{"type": "function", "function": record_user_details_json}, | |
| # {"type": "function", "function": record_unknown_question_json}] | |
| # class Me: | |
| # def __init__(self): | |
| # self.openai = OpenAI() | |
| # self.name = "Ed Donner" | |
| # reader = PdfReader("me/linkedin.pdf") | |
| # self.linkedin = "" | |
| # for page in reader.pages: | |
| # text = page.extract_text() | |
| # if text: | |
| # self.linkedin += text | |
| # with open("me/summary.txt", "r", encoding="utf-8") as f: | |
| # self.summary = f.read() | |
| # def handle_tool_call(self, tool_calls): | |
| # results = [] | |
| # for tool_call in tool_calls: | |
| # tool_name = tool_call.function.name | |
| # arguments = json.loads(tool_call.function.arguments) | |
| # print(f"Tool called: {tool_name}", flush=True) | |
| # tool = globals().get(tool_name) | |
| # result = tool(**arguments) if tool else {} | |
| # results.append({"role": "tool","content": json.dumps(result),"tool_call_id": tool_call.id}) | |
| # return results | |
| # def system_prompt(self): | |
| # system_prompt = f"You are acting as {self.name}. You are answering questions on {self.name}'s website, \ | |
| # particularly questions related to {self.name}'s career, background, skills and experience. \ | |
| # Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. \ | |
| # You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. \ | |
| # Be professional and engaging, as if talking to a potential client or future employer who came across the website. \ | |
| # If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \ | |
| # If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. " | |
| # system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n" | |
| # system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}." | |
| # return system_prompt | |
| # def chat(self, message, history): | |
| # messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}] | |
| # done = False | |
| # while not done: | |
| # response = self.openai.chat.completions.create(model="gpt-4o-mini", messages=messages, tools=tools) | |
| # if response.choices[0].finish_reason=="tool_calls": | |
| # message = response.choices[0].message | |
| # tool_calls = message.tool_calls | |
| # results = self.handle_tool_call(tool_calls) | |
| # messages.append(message) | |
| # messages.extend(results) | |
| # else: | |
| # done = True | |
| # return response.choices[0].message.content | |
| # if __name__ == "__main__": | |
| # me = Me() | |
| # gr.ChatInterface(me.chat, type="messages").launch() | |
| # from dotenv import load_dotenv | |
| # from openai import OpenAI | |
| # import json | |
| # import os | |
| # import requests | |
| # from pypdf import PdfReader | |
| # import gradio as gr | |
| # load_dotenv(override=True) | |
| # GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/" | |
| # google_api_key = os.getenv("GOOGLE_API_KEY") | |
| # # Initialize Gemini client | |
| # gemini = OpenAI( | |
| # base_url=GEMINI_BASE_URL, | |
| # api_key=google_api_key | |
| # ) | |
| # def push(text): | |
| # requests.post( | |
| # "https://api.pushover.net/1/messages.json", | |
| # data={ | |
| # "token": os.getenv("PUSHOVER_TOKEN"), | |
| # "user": os.getenv("PUSHOVER_USER"), | |
| # "message": text, | |
| # } | |
| # ) | |
| # def record_user_details(email, name="Name not provided", notes="not provided"): | |
| # push(f"Recording {name} with email {email} and notes {notes}") | |
| # return {"recorded": "ok"} | |
| # def record_unknown_question(question): | |
| # push(f"Recording {question}") | |
| # return {"recorded": "ok"} | |
| # record_user_details_json = { | |
| # "name": "record_user_details", | |
| # "description": "Use this tool to record that a user is interested in being in touch and provided an email address", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "email": { | |
| # "type": "string", | |
| # "description": "The email address of this user" | |
| # }, | |
| # "name": { | |
| # "type": "string", | |
| # "description": "The user's name, if they provided it" | |
| # }, | |
| # "notes": { | |
| # "type": "string", | |
| # "description": "Any additional information about the conversation that's worth recording to give context" | |
| # } | |
| # }, | |
| # "required": ["email"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # record_unknown_question_json = { | |
| # "name": "record_unknown_question", | |
| # "description": "Always use this tool to record any question that couldn't be answered as you didn't know the answer", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "question": { | |
| # "type": "string", | |
| # "description": "The question that couldn't be answered" | |
| # }, | |
| # }, | |
| # "required": ["question"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # tools = [ | |
| # {"type": "function", "function": record_user_details_json}, | |
| # {"type": "function", "function": record_unknown_question_json} | |
| # ] | |
| # class Me: | |
| # def __init__(self): | |
| # self.openai = gemini # REPLACED OpenAI WITH GEMINI | |
| # self.name = "AKASH M J" | |
| # reader = PdfReader("me/Profile.pdf") | |
| # self.linkedin = "" | |
| # for page in reader.pages: | |
| # text = page.extract_text() | |
| # if text: | |
| # self.linkedin += text | |
| # with open("me/summary.txt", "r", encoding="utf-8") as f: | |
| # self.summary = f.read() | |
| # def handle_tool_call(self, tool_calls): | |
| # results = [] | |
| # for tool_call in tool_calls: | |
| # tool_name = tool_call.function.name | |
| # arguments = json.loads(tool_call.function.arguments) | |
| # print(f"Tool called: {tool_name}", flush=True) | |
| # tool = globals().get(tool_name) | |
| # result = tool(**arguments) if tool else {} | |
| # results.append({ | |
| # "role": "tool", | |
| # "content": json.dumps(result), | |
| # "tool_call_id": tool_call.id | |
| # }) | |
| # return results | |
| # def system_prompt(self): | |
| # system_prompt = ( | |
| # f"You are acting as {self.name}. You are answering questions on {self.name}'s website, " | |
| # f"particularly questions related to {self.name}'s career, background, skills and experience. " | |
| # f"Your responsibility is to represent {self.name} for interactions on the website as faithfully as possible. " | |
| # f"You are given a summary of {self.name}'s background and LinkedIn profile which you can use to answer questions. " | |
| # f"Be professional and engaging, as if talking to a potential client or future employer who came across the website. " | |
| # f"If you don't know the answer to any question, use your record_unknown_question tool to record the question. " | |
| # f"If the user is engaging in discussion, try to steer them towards getting in touch via email." | |
| # ) | |
| # system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linkedin}\n\n" | |
| # system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}." | |
| # return system_prompt | |
| # def chat(self, message, history): | |
| # messages = [ | |
| # {"role": "system", "content": self.system_prompt()} | |
| # ] + history + [ | |
| # {"role": "user", "content": message} | |
| # ] | |
| # done = False | |
| # while not done: | |
| # # ---- CHANGED TO USE GEMINI ---- | |
| # response = self.openai.chat.completions.create( | |
| # model="gemini-2.0-flash", | |
| # messages=messages, | |
| # tools=tools | |
| # ) | |
| # # -------------------------------- | |
| # if response.choices[0].finish_reason == "tool_calls": | |
| # message = response.choices[0].message | |
| # tool_calls = message.tool_calls | |
| # results = self.handle_tool_call(tool_calls) | |
| # messages.append(message) | |
| # messages.extend(results) | |
| # else: | |
| # done = True | |
| # return response.choices[0].message.content | |
| # if __name__ == "__main__": | |
| # me = Me() | |
| # gr.ChatInterface(me.chat, type="messages").launch() | |
| # # gr.ChatInterface(me.chat).launch() | |
| # working perfectly one | |
| # # app.py | |
| # from dotenv import load_dotenv | |
| # from openai import OpenAI | |
| # import json | |
| # import os | |
| # import requests | |
| # from pypdf import PdfReader | |
| # import gradio as gr | |
| # import sqlite3 | |
| # import time | |
| # load_dotenv(override=True) | |
| # # --- CONFIG --- | |
| # GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/" | |
| # google_api_key = os.getenv("GOOGLE_API_KEY") | |
| # # Initialize Gemini client (using OpenAI wrapper you used earlier) | |
| # gemini = OpenAI(base_url=GEMINI_BASE_URL, api_key=google_api_key) | |
| # # --- Pushover helper --- | |
| # def push(text): | |
| # token = os.getenv("PUSHOVER_TOKEN") | |
| # user = os.getenv("PUSHOVER_USER") | |
| # if not token or not user: | |
| # print("Pushover credentials not set. Skipping push.") | |
| # return | |
| # try: | |
| # requests.post( | |
| # "https://api.pushover.net/1/messages.json", | |
| # data={"token": token, "user": user, "message": text}, | |
| # timeout=5 | |
| # ) | |
| # except Exception as e: | |
| # print("Pushover error:", e) | |
| # # --- Tools (actual implementations) --- | |
| # def record_user_details(email, name="Name not provided", notes="not provided"): | |
| # push(f"Recording contact: {name} <{email}> notes: {notes}") | |
| # return {"recorded": "ok", "email": email, "name": name} | |
| # def record_unknown_question(question): | |
| # push(f"Unknown question recorded: {question}") | |
| # # Optionally write to a local file for audits | |
| # os.makedirs("me/logs", exist_ok=True) | |
| # with open("me/logs/unknown_questions.txt", "a", encoding="utf-8") as f: | |
| # f.write(question.strip() + "\n") | |
| # return {"recorded": "ok", "question": question} | |
| # def search_faq(query): | |
| # db_path = os.path.join("me", "qa.db") | |
| # if not os.path.exists(db_path): | |
| # return {"answer": "FAQ database not found."} | |
| # conn = sqlite3.connect(db_path) | |
| # cur = conn.cursor() | |
| # cur.execute("SELECT answer FROM faq WHERE question LIKE ? LIMIT 1", (f"%{query}%",)) | |
| # row = cur.fetchone() | |
| # conn.close() | |
| # return {"answer": row[0]} if row else {"answer": "not found"} | |
| # # --- Tool JSON metadata (for function-calling style) --- | |
| # record_user_details_json = { | |
| # "name": "record_user_details", | |
| # "description": "Record an interested user's email and optional name/notes.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "email": {"type": "string"}, | |
| # "name": {"type": "string"}, | |
| # "notes": {"type": "string"} | |
| # }, | |
| # "required": ["email"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # record_unknown_question_json = { | |
| # "name": "record_unknown_question", | |
| # "description": "Record any question the assistant could not answer.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "question": {"type": "string"} | |
| # }, | |
| # "required": ["question"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # search_faq_json = { | |
| # "name": "search_faq", | |
| # "description": "Search the FAQ database for a question.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "query": {"type": "string"} | |
| # }, | |
| # "required": ["query"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # tools = [ | |
| # {"type": "function", "function": record_user_details_json}, | |
| # {"type": "function", "function": record_unknown_question_json}, | |
| # {"type": "function", "function": search_faq_json} | |
| # ] | |
| # # --- The assistant class --- | |
| # class Me: | |
| # def __init__(self): | |
| # self.openai = gemini | |
| # self.name = "AKASH M J" | |
| # # Load profile PDF into self.linkedin | |
| # self.linkedin = "" | |
| # try: | |
| # reader = PdfReader(os.path.join("me", "Profile.pdf")) | |
| # for page in reader.pages: | |
| # text = page.extract_text() | |
| # if text: | |
| # self.linkedin += text + "\n" | |
| # except Exception as e: | |
| # print("Could not read Profile.pdf:", e) | |
| # # Load summary | |
| # try: | |
| # with open(os.path.join("me", "summary.txt"), "r", encoding="utf-8") as f: | |
| # self.summary = f.read() | |
| # except Exception as e: | |
| # print("Could not read summary.txt:", e) | |
| # self.summary = "" | |
| # # Load knowledge files (RAG-style simple concatenation) | |
| # self.knowledge = "" | |
| # kb_dir = os.path.join("me", "knowledge") | |
| # if os.path.exists(kb_dir): | |
| # for fn in sorted(os.listdir(kb_dir)): | |
| # path = os.path.join(kb_dir, fn) | |
| # try: | |
| # with open(path, "r", encoding="utf-8") as f: | |
| # self.knowledge += f"# {fn}\n" + f.read() + "\n\n" | |
| # except Exception as e: | |
| # print("Error reading", path, e) | |
| # def system_prompt(self): | |
| # system_prompt = ( | |
| # f"You are acting as {self.name}. Answer questions about {self.name}'s background " | |
| # "and experience using the context provided. Be professional and concise. " | |
| # "If you don't know an answer, use the record_unknown_question tool." | |
| # ) | |
| # system_prompt += f"\n\n## Summary:\n{self.summary}\n\n" | |
| # system_prompt += f"## LinkedIn profile (extracted):\n{self.linkedin}\n\n" | |
| # system_prompt += f"## Knowledge base:\n{self.knowledge}\n\n" | |
| # return system_prompt | |
| # def handle_tool_call(self, tool_calls): | |
| # results = [] | |
| # for tool_call in tool_calls: | |
| # tool_name = tool_call.function.name | |
| # try: | |
| # arguments = json.loads(tool_call.function.arguments) | |
| # except Exception: | |
| # arguments = {} | |
| # print("Tool called:", tool_name, arguments, flush=True) | |
| # tool = globals().get(tool_name) | |
| # result = tool(**arguments) if tool else {} | |
| # results.append({ | |
| # "role": "tool", | |
| # "content": json.dumps(result), | |
| # "tool_call_id": tool_call.id | |
| # }) | |
| # return results | |
| # # Simple router/orchestrator: route common queries to the FAQ or to the LLM | |
| # def route_question(self, question): | |
| # q = question.lower() | |
| # # keywords that map to FAQ | |
| # faq_keywords = ["project", "tech stack", "stack", "skill", "skills", "study", "education", "experience"] | |
| # if any(k in q for k in faq_keywords): | |
| # return "search_faq" | |
| # return None | |
| # def evaluate_answer(self, user_question, ai_answer): | |
| # # Simple evaluator: ask the LLM to judge the quality | |
| # eval_prompt = f""" | |
| # You are an evaluator. Judge whether the assistant reply is clear, correct, and complete for the user question. | |
| # Return exactly PASS or FAIL and a one-line reason. | |
| # User question: | |
| # {user_question} | |
| # Assistant reply: | |
| # {ai_answer} | |
| # """ | |
| # try: | |
| # ev = self.openai.chat.completions.create( | |
| # model="gemini-2.0-flash", | |
| # messages=[{"role":"system","content":"You are an evaluator."}, | |
| # {"role":"user","content":eval_prompt}] | |
| # ) | |
| # text = ev.choices[0].message.content.strip() | |
| # # very simple parse | |
| # if text.upper().startswith("PASS"): | |
| # return {"result":"PASS", "note": text} | |
| # else: | |
| # return {"result":"FAIL", "note": text} | |
| # except Exception as e: | |
| # print("Evaluator failed:", e) | |
| # return {"result":"UNKNOWN", "note": str(e)} | |
| # def chat(self, message, history): | |
| # # build messages with system prompt + history + user | |
| # messages = [{"role":"system","content":self.system_prompt()}] + history + [{"role":"user","content":message}] | |
| # # 1) Router: check if the question should use the FAQ tool | |
| # tool_to_use = self.route_question(message) | |
| # if tool_to_use == "search_faq": | |
| # # call tool directly and return evaluated answer | |
| # tool_result = search_faq(message) | |
| # raw_answer = tool_result.get("answer", "I don't have that in my FAQ.") | |
| # eval_res = self.evaluate_answer(message, raw_answer) | |
| # if eval_res["result"] == "PASS": | |
| # return raw_answer | |
| # else: | |
| # # fall back to LLM if FAIL | |
| # pass | |
| # # 2) Normal LLM flow with tools support (function-calling style) | |
| # done = False | |
| # while not done: | |
| # response = self.openai.chat.completions.create( | |
| # model="gemini-2.0-flash", | |
| # messages=messages, | |
| # tools=tools | |
| # ) | |
| # finish = response.choices[0].finish_reason | |
| # if finish == "tool_calls": | |
| # # the LLM asked to call a tool | |
| # message_obj = response.choices[0].message | |
| # tool_calls = getattr(message_obj, "tool_calls", []) | |
| # results = self.handle_tool_call(tool_calls) | |
| # messages.append(message_obj) | |
| # messages.extend(results) | |
| # # loop again so the LLM can consume tool outputs | |
| # else: | |
| # done = True | |
| # ai_answer = response.choices[0].message.content | |
| # # 3) Evaluate the answer; if FAIL, ask LLM to improve | |
| # eval_res = self.evaluate_answer(message, ai_answer) | |
| # if eval_res["result"] == "FAIL": | |
| # # ask the model to improve using the critique | |
| # improve_prompt = f"User question:\n{message}\n\nAssistant previous reply:\n{ai_answer}\n\nEvaluator note:\n{eval_res['note']}\n\nPlease produce an improved concise answer." | |
| # messages.append({"role":"user","content":improve_prompt}) | |
| # improved_resp = self.openai.chat.completions.create(model="gemini-2.0-flash", messages=messages) | |
| # ai_answer = improved_resp.choices[0].message.content | |
| # return ai_answer | |
| # # --- Launch --- | |
| # if __name__ == "__main__": | |
| # me = Me() | |
| # gr.ChatInterface(me.chat, type="messages").launch() | |
| # # gr.ChatInterface(me.chat).launch() | |
| # # openAI router using Gemini | |
| # # app.py | |
| # from dotenv import load_dotenv | |
| # from openai import OpenAI | |
| # import json | |
| # import os | |
| # import requests | |
| # from pypdf import PdfReader | |
| # import gradio as gr | |
| # import sqlite3 | |
| # import time | |
| # load_dotenv(override=True) | |
| # # --- CONFIG (OpenRouter instead of Google Gemini) --- | |
| # OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1" | |
| # openrouter_api_key = os.getenv("OPENROUTER_API_KEY") | |
| # # Initialize OpenRouter client | |
| # gemini = OpenAI( | |
| # base_url=OPENROUTER_BASE_URL, | |
| # api_key=openrouter_api_key, | |
| # default_headers={ | |
| # "HTTP-Referer": "http://localhost", # required by OpenRouter | |
| # "X-Title": "My-Gemini-App" | |
| # } | |
| # ) | |
| # # --- Pushover helper --- | |
| # def push(text): | |
| # token = os.getenv("PUSHOVER_TOKEN") | |
| # user = os.getenv("PUSHOVER_USER") | |
| # if not token or not user: | |
| # print("Pushover credentials not set. Skipping push.") | |
| # return | |
| # try: | |
| # requests.post( | |
| # "https://api.pushover.net/1/messages.json", | |
| # data={"token": token, "user": user, "message": text}, | |
| # timeout=5 | |
| # ) | |
| # except Exception as e: | |
| # print("Pushover error:", e) | |
| # # --- Tools --- | |
| # def record_user_details(email, name="Name not provided", notes="not provided"): | |
| # push(f"Recording contact: {name} <{email}> notes: {notes}") | |
| # return {"recorded": "ok", "email": email, "name": name} | |
| # def record_unknown_question(question): | |
| # push(f"Unknown question recorded: {question}") | |
| # os.makedirs("me/logs", exist_ok=True) | |
| # with open("me/logs/unknown_questions.txt", "a", encoding="utf-8") as f: | |
| # f.write(question.strip() + "\n") | |
| # return {"recorded": "ok", "question": question} | |
| # def search_faq(query): | |
| # db_path = os.path.join("me", "qa.db") | |
| # if not os.path.exists(db_path): | |
| # return {"answer": "FAQ database not found."} | |
| # conn = sqlite3.connect(db_path) | |
| # cur = conn.cursor() | |
| # cur.execute("SELECT answer FROM faq WHERE question LIKE ? LIMIT 1", (f"%{query}%",)) | |
| # row = cur.fetchone() | |
| # conn.close() | |
| # return {"answer": row[0]} if row else {"answer": "not found"} | |
| # # --- Tool JSON metadata --- | |
| # record_user_details_json = { | |
| # "name": "record_user_details", | |
| # "description": "Record an interested user's email and optional name/notes.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "email": {"type": "string"}, | |
| # "name": {"type": "string"}, | |
| # "notes": {"type": "string"} | |
| # }, | |
| # "required": ["email"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # record_unknown_question_json = { | |
| # "name": "record_unknown_question", | |
| # "description": "Record any question the assistant could not answer.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "question": {"type": "string"} | |
| # }, | |
| # "required": ["question"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # search_faq_json = { | |
| # "name": "search_faq", | |
| # "description": "Search the FAQ database for a question.", | |
| # "parameters": { | |
| # "type": "object", | |
| # "properties": { | |
| # "query": {"type": "string"} | |
| # }, | |
| # "required": ["query"], | |
| # "additionalProperties": False | |
| # } | |
| # } | |
| # tools = [ | |
| # {"type": "function", "function": record_user_details_json}, | |
| # {"type": "function", "function": record_unknown_question_json}, | |
| # {"type": "function", "function": search_faq_json} | |
| # ] | |
| # # --- The assistant class --- | |
| # class Me: | |
| # def __init__(self): | |
| # self.openai = gemini | |
| # self.name = "AKASH M J" | |
| # self.linkedin = "" | |
| # try: | |
| # reader = PdfReader(os.path.join("me", "Profile.pdf")) | |
| # for page in reader.pages: | |
| # text = page.extract_text() | |
| # if text: | |
| # self.linkedin += text + "\n" | |
| # except Exception as e: | |
| # print("Could not read Profile.pdf:", e) | |
| # try: | |
| # with open(os.path.join("me", "summary.txt"), "r", encoding="utf-8") as f: | |
| # self.summary = f.read() | |
| # except: | |
| # self.summary = "" | |
| # self.knowledge = "" | |
| # kb_dir = os.path.join("me", "knowledge") | |
| # if os.path.exists(kb_dir): | |
| # for fn in sorted(os.listdir(kb_dir)): | |
| # try: | |
| # with open(os.path.join(kb_dir, fn), "r", encoding="utf-8") as f: | |
| # self.knowledge += f"# {fn}\n" + f.read() + "\n\n" | |
| # except: | |
| # pass | |
| # def system_prompt(self): | |
| # system_prompt = ( | |
| # f"You are acting as {self.name}. Answer questions about {self.name}'s background." | |
| # ) | |
| # system_prompt += f"\n\n## Summary:\n{self.summary}\n\n" | |
| # system_prompt += f"## LinkedIn profile:\n{self.linkedin}\n\n" | |
| # system_prompt += f"## Knowledge base:\n{self.knowledge}\n\n" | |
| # return system_prompt | |
| # def handle_tool_call(self, tool_calls): | |
| # results = [] | |
| # for tool_call in tool_calls: | |
| # tool_name = tool_call.function.name | |
| # arguments = json.loads(tool_call.function.arguments) | |
| # tool = globals().get(tool_name) | |
| # result = tool(**arguments) if tool else {} | |
| # results.append({ | |
| # "role": "tool", | |
| # "content": json.dumps(result), | |
| # "tool_call_id": tool_call.id | |
| # }) | |
| # return results | |
| # def route_question(self, q): | |
| # q = q.lower() | |
| # faq_keywords = ["project", "skills", "experience", "study", "education"] | |
| # if any(k in q for k in faq_keywords): | |
| # return "search_faq" | |
| # return None | |
| # def evaluate_answer(self, user_question, ai_answer): | |
| # eval_prompt = f""" | |
| # Evaluate if the answer is good. Respond with PASS or FAIL. | |
| # User question: | |
| # {user_question} | |
| # Assistant reply: | |
| # {ai_answer} | |
| # """ | |
| # try: | |
| # ev = self.openai.chat.completions.create( | |
| # model="google/gemini-2.0-flash-exp:free", | |
| # messages=[ | |
| # {"role": "system", "content": "You are an evaluator."}, | |
| # {"role": "user", "content": eval_prompt} | |
| # ] | |
| # ) | |
| # text = ev.choices[0].message.content.strip() | |
| # if text.upper().startswith("PASS"): | |
| # return {"result": "PASS", "note": text} | |
| # return {"result": "FAIL", "note": text} | |
| # except Exception as e: | |
| # return {"result": "UNKNOWN", "note": str(e)} | |
| # def chat(self, message, history): | |
| # messages = [{"role": "system", "content": self.system_prompt()}] + history + [{"role": "user", "content": message}] | |
| # tool_to_use = self.route_question(message) | |
| # if tool_to_use == "search_faq": | |
| # tool_result = search_faq(message) | |
| # ans = tool_result.get("answer", "not found") | |
| # if self.evaluate_answer(message, ans)["result"] == "PASS": | |
| # return ans | |
| # done = False | |
| # while not done: | |
| # response = self.openai.chat.completions.create( | |
| # model="google/gemini-2.0-flash-exp:free", | |
| # messages=messages, | |
| # tools=tools | |
| # ) | |
| # finish = response.choices[0].finish_reason | |
| # if finish == "tool_calls": | |
| # tool_calls = response.choices[0].message.tool_calls | |
| # results = self.handle_tool_call(tool_calls) | |
| # messages.append(response.choices[0].message) | |
| # messages.extend(results) | |
| # else: | |
| # done = True | |
| # ai_answer = response.choices[0].message.content | |
| # eval_res = self.evaluate_answer(message, ai_answer) | |
| # if eval_res["result"] == "FAIL": | |
| # improve_prompt = f"Improve this answer:\n{ai_answer}\n\nCritique:\n{eval_res['note']}" | |
| # messages.append({"role": "user", "content": improve_prompt}) | |
| # improved = self.openai.chat.completions.create( | |
| # model="google/gemini-2.0-flash-exp:free", | |
| # messages=messages | |
| # ) | |
| # ai_answer = improved.choices[0].message.content | |
| # return ai_answer | |
| # # --- Launch --- | |
| # if __name__ == "__main__": | |
| # me = Me() | |
| # gr.ChatInterface(me.chat, type="messages").launch() | |
| # openAI router using openai/gpt-oss-120b:free | |
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| import json | |
| import os | |
| import requests | |
| from pypdf import PdfReader | |
| import gradio as gr | |
| import sqlite3 | |
| import time | |
| load_dotenv(override=True) | |
| # ------------------------------------------------------------------- | |
| # OPENROUTER CONFIG | |
| # ------------------------------------------------------------------- | |
| OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") | |
| openrouter = OpenAI( | |
| base_url="https://openrouter.ai/api/v1", | |
| api_key=OPENROUTER_API_KEY | |
| ) | |
| # Your chosen free model on OpenRouter | |
| MODEL_NAME = "openai/gpt-oss-120b:free" | |
| # ------------------------------------------------------------------- | |
| # Pushover helper | |
| # ------------------------------------------------------------------- | |
| def push(text): | |
| token = os.getenv("PUSHOVER_TOKEN") | |
| user = os.getenv("PUSHOVER_USER") | |
| if not token or not user: | |
| print("Pushover credentials not set. Skipping push.") | |
| return | |
| try: | |
| requests.post( | |
| "https://api.pushover.net/1/messages.json", | |
| data={"token": token, "user": user, "message": text}, | |
| timeout=5 | |
| ) | |
| except Exception as e: | |
| print("Pushover error:", e) | |
| # ------------------------------------------------------------------- | |
| # TOOLS | |
| # ------------------------------------------------------------------- | |
| def record_user_details(email, name="Name not provided", notes="not provided"): | |
| push(f"Recording contact: {name} <{email}> notes: {notes}") | |
| return {"recorded": "ok", "email": email, "name": name} | |
| def record_unknown_question(question): | |
| push(f"Unknown question recorded: {question}") | |
| os.makedirs("me/logs", exist_ok=True) | |
| with open("me/logs/unknown_questions.txt", "a", encoding="utf-8") as f: | |
| f.write(question.strip() + "\n") | |
| return {"recorded": "ok", "question": question} | |
| def search_faq(query): | |
| db_path = os.path.join("me", "qa.db") | |
| if not os.path.exists(db_path): | |
| return {"answer": "FAQ database not found."} | |
| conn = sqlite3.connect(db_path) | |
| cur = conn.cursor() | |
| cur.execute("SELECT answer FROM faq WHERE question LIKE ? LIMIT 1", (f"%{query}%",)) | |
| row = cur.fetchone() | |
| conn.close() | |
| return {"answer": row[0]} if row else {"answer": "not found"} | |
| # Tool JSON | |
| record_user_details_json = { | |
| "name": "record_user_details", | |
| "description": "Record an interested user's email and optional name/notes.", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "email": {"type": "string"}, | |
| "name": {"type": "string"}, | |
| "notes": {"type": "string"} | |
| }, | |
| "required": ["email"], | |
| "additionalProperties": False | |
| } | |
| } | |
| record_unknown_question_json = { | |
| "name": "record_unknown_question", | |
| "description": "Record any question the assistant could not answer.", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "question": {"type": "string"} | |
| }, | |
| "required": ["question"], | |
| "additionalProperties": False | |
| } | |
| } | |
| search_faq_json = { | |
| "name": "search_faq", | |
| "description": "Search the FAQ database for a question.", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "query": {"type": "string"} | |
| }, | |
| "required": ["query"], | |
| "additionalProperties": False | |
| } | |
| } | |
| tools = [ | |
| {"type": "function", "function": record_user_details_json}, | |
| {"type": "function", "function": record_unknown_question_json}, | |
| {"type": "function", "function": search_faq_json} | |
| ] | |
| # ------------------------------------------------------------------- | |
| # MAIN ASSISTANT CLASS | |
| # ------------------------------------------------------------------- | |
| class Me: | |
| def __init__(self): | |
| self.openai = openrouter # <--- using OpenRouter | |
| self.name = "AKASH M J" | |
| # Load PDF profile | |
| self.linkedin = "" | |
| try: | |
| reader = PdfReader(os.path.join("me", "Profile.pdf")) | |
| for page in reader.pages: | |
| text = page.extract_text() | |
| if text: | |
| self.linkedin += text + "\n" | |
| except Exception as e: | |
| print("Could not read Profile.pdf:", e) | |
| # Load summary | |
| try: | |
| with open(os.path.join("me", "summary.txt"), "r", encoding="utf-8") as f: | |
| self.summary = f.read() | |
| except Exception as e: | |
| print("Could not read summary.txt:", e) | |
| self.summary = "" | |
| # Load knowledge files | |
| self.knowledge = "" | |
| kb_dir = os.path.join("me", "knowledge") | |
| if os.path.exists(kb_dir): | |
| for fn in sorted(os.listdir(kb_dir)): | |
| path = os.path.join(kb_dir, fn) | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| self.knowledge += f"# {fn}\n" + f.read() + "\n\n" | |
| except Exception as e: | |
| print("Error reading", path, e) | |
| def system_prompt(self): | |
| system_prompt = ( | |
| f"You are acting as {self.name}. Answer questions about {self.name}'s " | |
| "background and experience using the context provided. Be professional. " | |
| "If unsure, use record_unknown_question." | |
| ) | |
| system_prompt += f"\n\n## Summary:\n{self.summary}\n\n" | |
| system_prompt += f"## LinkedIn:\n{self.linkedin}\n\n" | |
| system_prompt += f"## Knowledge:\n{self.knowledge}\n\n" | |
| return system_prompt | |
| def handle_tool_call(self, tool_calls): | |
| results = [] | |
| for tool_call in tool_calls: | |
| tool_name = tool_call.function.name | |
| try: | |
| arguments = json.loads(tool_call.function.arguments) | |
| except Exception: | |
| arguments = {} | |
| print("Tool called:", tool_name, arguments, flush=True) | |
| tool = globals().get(tool_name) | |
| result = tool(**arguments) if tool else {} | |
| results.append({ | |
| "role": "tool", | |
| "content": json.dumps(result), | |
| "tool_call_id": tool_call.id | |
| }) | |
| return results | |
| # Router for FAQ | |
| def route_question(self, question): | |
| q = question.lower() | |
| faq_keywords = ["project", "tech stack", "skill", "education", "experience"] | |
| if any(k in q for k in faq_keywords): | |
| return "search_faq" | |
| return None | |
| # Evaluator | |
| def evaluate_answer(self, user_question, ai_answer): | |
| eval_prompt = f""" | |
| Evaluate the answer clarity and correctness. | |
| Return PASS or FAIL and one-line reason. | |
| Question: {user_question} | |
| Answer: {ai_answer} | |
| """ | |
| try: | |
| ev = self.openai.chat.completions.create( | |
| model=MODEL_NAME, | |
| messages=[ | |
| {"role": "system", "content": "You are an evaluator."}, | |
| {"role": "user", "content": eval_prompt} | |
| ] | |
| ) | |
| text = ev.choices[0].message.content.strip() | |
| if text.upper().startswith("PASS"): | |
| return {"result": "PASS", "note": text} | |
| else: | |
| return {"result": "FAIL", "note": text} | |
| except Exception as e: | |
| return {"result": "UNKNOWN", "note": str(e)} | |
| # Chat | |
| def chat(self, message, history): | |
| messages = [{"role":"system","content":self.system_prompt()}] + history + [{"role":"user","content":message}] | |
| # Router: FAQ | |
| tool_to_use = self.route_question(message) | |
| if tool_to_use == "search_faq": | |
| tool_result = search_faq(message) | |
| raw_answer = tool_result.get("answer", "Not found.") | |
| ev = self.evaluate_answer(message, raw_answer) | |
| if ev["result"] == "PASS": | |
| return raw_answer | |
| # LLM with tools | |
| done = False | |
| while not done: | |
| response = self.openai.chat.completions.create( | |
| model=MODEL_NAME, | |
| messages=messages, | |
| tools=tools | |
| ) | |
| finish = response.choices[0].finish_reason | |
| if finish == "tool_calls": | |
| msg = response.choices[0].message | |
| tool_calls = getattr(msg, "tool_calls", []) | |
| results = self.handle_tool_call(tool_calls) | |
| messages.append(msg) | |
| messages.extend(results) | |
| else: | |
| done = True | |
| ai_answer = response.choices[0].message.content | |
| # Evaluate | |
| eval_res = self.evaluate_answer(message, ai_answer) | |
| if eval_res["result"] == "FAIL": | |
| improve_prompt = ( | |
| f"User question:\n{message}\n\n" | |
| f"Previous answer:\n{ai_answer}\n\n" | |
| f"Evaluator note:\n{eval_res['note']}\n\n" | |
| "Please provide an improved answer." | |
| ) | |
| messages.append({"role": "user", "content": improve_prompt}) | |
| improved_resp = self.openai.chat.completions.create( | |
| model=MODEL_NAME, messages=messages | |
| ) | |
| ai_answer = improved_resp.choices[0].message.content | |
| return ai_answer | |
| # ------------------------------------------------------------------- | |
| # GRADIO LAUNCH | |
| # ------------------------------------------------------------------- | |
| if __name__ == "__main__": | |
| me = Me() | |
| # gr.ChatInterface(me.chat, type="messages").launch() | |
| gr.ChatInterface(me.chat).launch() | |