Spaces:
Running
Running
| from dotenv import load_dotenv | |
| from openai import OpenAI | |
| from pypdf import PdfReader | |
| import gradio as gr | |
| import requests | |
| import json | |
| import os | |
| from reflection_memory import init_db, save_reflection, fetch_recent | |
| # ================================================== | |
| # Environment | |
| # ================================================== | |
| load_dotenv(override=True) | |
| PUSHOVER_TOKEN = os.getenv("PUSHOVER_TOKEN") | |
| PUSHOVER_USER = os.getenv("PUSHOVER_USER") | |
| MODEL_NAME = "gpt-4.1" | |
| MAX_HISTORY_MESSAGES = 6 | |
| MAX_TURNS = 5 | |
| # ================================================== | |
| # Conversation awareness helpers | |
| # ================================================== | |
| UNCERTAINTY_MARKERS = ( | |
| "not sure", | |
| "confused", | |
| "stuck", | |
| "overwhelmed", | |
| "i don't know", | |
| "lost", | |
| "anxious", | |
| ) | |
| def should_slow_down(text: str) -> bool: | |
| return any(marker in text.lower() for marker in UNCERTAINTY_MARKERS) | |
| # ================================================== | |
| # Notifications (Pushover) | |
| # ================================================== | |
| def push(message: str) -> None: | |
| if not PUSHOVER_TOKEN or not PUSHOVER_USER: | |
| return | |
| try: | |
| requests.post( | |
| "https://api.pushover.net/1/messages.json", | |
| data={ | |
| "token": PUSHOVER_TOKEN, | |
| "user": PUSHOVER_USER, | |
| "message": message, | |
| }, | |
| timeout=5, | |
| ) | |
| except Exception as e: | |
| print("Pushover failed:", e) | |
| # ================================================== | |
| # Tool Implementations | |
| # ================================================== | |
| def record_user_details( | |
| email: str, | |
| name: str = "Name not provided", | |
| notes: str = "not provided", | |
| ): | |
| push(f"New contact: {name} | {email} | Notes: {notes}") | |
| return {"recorded": "ok"} | |
| def record_unknown_question(question: str): | |
| push(f"Unknown question: {question}") | |
| return {"recorded": "ok"} | |
| # ================================================== | |
| # Tool Schemas | |
| # ================================================== | |
| TOOLS = [ | |
| { | |
| "type": "function", | |
| "function": { | |
| "name": "record_user_details", | |
| "description": "Record that a user is interested in being in touch", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "email": {"type": "string"}, | |
| "name": {"type": "string"}, | |
| "notes": {"type": "string"}, | |
| }, | |
| "required": ["email"], | |
| "additionalProperties": False, | |
| }, | |
| }, | |
| }, | |
| { | |
| "type": "function", | |
| "function": { | |
| "name": "record_unknown_question", | |
| "description": "Record any question that could not be answered", | |
| "parameters": { | |
| "type": "object", | |
| "properties": { | |
| "question": {"type": "string"}, | |
| }, | |
| "required": ["question"], | |
| "additionalProperties": False, | |
| }, | |
| }, | |
| }, | |
| ] | |
| # ================================================== | |
| # Chatbot | |
| # ================================================== | |
| class Me: | |
| def __init__(self): | |
| self.client = OpenAI() | |
| self.name = "Urvashi Patel" | |
| init_db() | |
| self.linkedin = self._load_pdf("me/linkedin.pdf") | |
| self.summary = self._load_text("me/summary.txt") | |
| self.about_urva = self._load_text("me/about_urva.md") | |
| # ----------------------------- | |
| # Loaders | |
| # ----------------------------- | |
| def _load_text(self, path: str) -> str: | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| return f.read() | |
| except Exception as e: | |
| print(f"Failed to load {path}:", e) | |
| return "" | |
| def _load_pdf(self, path: str) -> str: | |
| text = "" | |
| try: | |
| reader = PdfReader(path) | |
| for page in reader.pages: | |
| page_text = page.extract_text() | |
| if page_text: | |
| text += page_text | |
| except Exception as e: | |
| print(f"Failed to load {path}:", e) | |
| return text | |
| # ----------------------------- | |
| # Prompt | |
| # ----------------------------- | |
| def system_prompt(self) -> str: | |
| return f""" | |
| You are not a general AI assistant. You only answer as Urvashi Patel. | |
| You are acting as {self.name} on her personal website. | |
| Answer questions about career, background, skills, experience, and interests. | |
| Be thoughtful, grounded, and professional. | |
| Avoid generic advice. Prefer clarity and context. | |
| If asked for medical, legal, or financial advice, politely decline. | |
| When the user sounds uncertain: | |
| - Ask ONE reflective question | |
| - Do not rush to solutions | |
| - Offer options, not instructions | |
| ## Personal Context | |
| {self.about_urva} | |
| ## Summary | |
| {self.summary} | |
| {self.linkedin} | |
| Stay in character at all times. | |
| """ | |
| # ----------------------------- | |
| # Tool handling | |
| # ----------------------------- | |
| def handle_tool_calls(self, tool_calls): | |
| results = [] | |
| for call in tool_calls: | |
| tool_name = call.function.name | |
| arguments = json.loads(call.function.arguments) | |
| tool_fn = globals().get(tool_name) | |
| result = tool_fn(**arguments) if tool_fn else {} | |
| results.append( | |
| { | |
| "role": "tool", | |
| "content": json.dumps(result), | |
| "tool_call_id": call.id, | |
| } | |
| ) | |
| return results | |
| # ----------------------------- | |
| # Chat loop | |
| # ----------------------------- | |
| def chat(self, message, history): | |
| trimmed_history = history[-MAX_HISTORY_MESSAGES:] | |
| # Fetch light reflection memory | |
| recent_reflections = fetch_recent() | |
| reflection_context = "" | |
| if recent_reflections: | |
| reflection_context = "\n".join( | |
| f"- {r['theme']}: {r['summary']}" | |
| for r in recent_reflections | |
| ) | |
| messages = [ | |
| {"role": "system", "content": self.system_prompt()} | |
| ] | |
| if reflection_context: | |
| messages.append( | |
| { | |
| "role": "system", | |
| "content": f"Recent reflections for continuity:\n{reflection_context}", | |
| } | |
| ) | |
| messages.extend(trimmed_history) | |
| messages.append({"role": "user", "content": message}) | |
| for _ in range(MAX_TURNS): | |
| response = self.client.chat.completions.create( | |
| model=MODEL_NAME, | |
| messages=messages, | |
| tools=TOOLS, | |
| ) | |
| choice = response.choices[0] | |
| if choice.finish_reason == "tool_calls": | |
| messages.append(choice.message) | |
| messages.extend(self.handle_tool_calls(choice.message.tool_calls)) | |
| else: | |
| final_answer = choice.message.content | |
| theme = "direction" if should_slow_down(message) else "general" | |
| save_reflection( | |
| theme=theme, | |
| user_question=message, | |
| assistant_summary=final_answer[:300], | |
| ) | |
| return final_answer | |
| return "Thanks for your question — feel free to rephrase or reach out directly." | |
| # ================================================== | |
| # UI | |
| # ================================================== | |
| if __name__ == "__main__": | |
| me = Me() | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown( | |
| """ | |
| ### About Urvashi Patel — Integration Developer | |
| Ask about experience, skills, projects, and approach to integration and automation. | |
| """ | |
| ) | |
| gr.ChatInterface( | |
| me.chat, | |
| type="messages", | |
| submit_btn="Reflect", | |
| ) | |
| demo.launch() | |