Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from huggingface_hub import InferenceClient | |
| import sys | |
| import io | |
| import traceback | |
| import re # Import the regular expressions module | |
| # Initialize the AI model | |
| model_name = "Qwen/Qwen2.5-72B-Instruct" | |
| client = InferenceClient(model_name) | |
| def llm_inference(messages): | |
| eos_token = "<|endoftext|>" | |
| output = client.chat.completions.create( | |
| messages=messages, | |
| stream=False, | |
| temperature=0.7, | |
| top_p=0.1, | |
| max_tokens=412, | |
| stop=[eos_token] | |
| ) | |
| response = '' | |
| for choice in output.choices: | |
| response += choice['message']['content'] | |
| return response | |
| def execute_code(code): | |
| old_stdout = sys.stdout | |
| redirected_output = sys.stdout = io.StringIO() | |
| try: | |
| exec(code, {}) | |
| output = redirected_output.getvalue() | |
| except Exception as e: | |
| output = f"Error: {e}\n{traceback.format_exc()}" | |
| finally: | |
| sys.stdout = old_stdout | |
| return output | |
| def is_math_task(user_input): | |
| """ | |
| Simple heuristic to determine if the user input is a math task. | |
| This can be enhanced with more sophisticated methods or NLP techniques. | |
| """ | |
| math_keywords = [ | |
| 'calculate', 'compute', 'solve', 'integrate', 'differentiate', | |
| 'derivative', 'integral', 'factorial', 'sum', 'product', | |
| 'average', 'mean', 'median', 'mode', 'variance', 'standard deviation', | |
| 'limit', 'matrix', 'determinant', 'equation', 'expression' | |
| ] | |
| operators = ['+', '-', '*', '/', '^', '**', 'sqrt', 'sin', 'cos', 'tan', 'log', 'exp'] | |
| user_input_lower = user_input.lower() | |
| return any(keyword in user_input_lower for keyword in math_keywords) or any(op in user_input for op in operators) | |
| def chat(user_input, history): | |
| """ | |
| Handles the chat interaction. If the user input is detected as a math task, | |
| it performs a two-step process: | |
| 1. Generates an explanation of how to solve the task. | |
| 2. Generates Python code based on the explanation and executes it. | |
| """ | |
| if is_math_task(user_input): | |
| # Step 1: Generate Explanation | |
| explanation_messages = [ | |
| { | |
| "role": "system", | |
| "content": "Provide a very small explanation on how to approach the following mathematical task without calculating the answer." | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"Provide a short explanation on how to solve the following mathematical problem: {user_input}" | |
| }, | |
| ] | |
| explanation = llm_inference(explanation_messages) | |
| # Step 2: Generate Python Code using Explanation and User Prompt | |
| code_prompt = f"Based on the following explanation, write a Python program to solve the mathematical task using Python. Ensure that the program includes a print statement to output the answer. Write only code. Writing any comments or anything else is prohibited. \n\nExplanation: {explanation}\n\nTask: {user_input}" | |
| code_messages = [ | |
| { | |
| "role": "system", | |
| "content": "You are a Python developer. Write Python code based on the provided explanation and task." | |
| }, | |
| { | |
| "role": "user", | |
| "content": f"{code_prompt}" | |
| }, | |
| ] | |
| generated_code = llm_inference(code_messages) | |
| # Strip code tags using regex | |
| # This regex removes ```python and ``` or any other markdown code fences | |
| cleaned_code = re.sub(r"```(?:python)?\n?", "", generated_code).strip() | |
| cleaned_code = re.sub(r"```", "", cleaned_code).strip() | |
| # Execute the cleaned code | |
| execution_result = execute_code(cleaned_code) | |
| # Prepare the assistant response | |
| assistant_response = ( | |
| f"**Explanation:**\n{explanation}\n\n" | |
| f"**Generated Python Code:**\n```python\n{cleaned_code}\n```\n\n" | |
| f"**Execution Result:**\n```\n{execution_result}\n```" | |
| ) | |
| else: | |
| # For regular chat messages, use the AI's response | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": "You are a helpful assistant." | |
| }, | |
| { | |
| "role": "user", | |
| "content": user_input | |
| }, | |
| ] | |
| assistant_response = llm_inference(messages) | |
| # Append to chat history | |
| history.append((user_input, assistant_response)) | |
| return history, history | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🐍 Python Helper Chatbot") | |
| with gr.Tab("Chat"): | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(placeholder="Type your message here...", label="Your Message") | |
| msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot]) | |
| with gr.Tab("Interpreter"): | |
| gr.Markdown("### 🖥️ Test Your Code") | |
| code_input = gr.Code(language="python", label="Python Code Input") | |
| run_button = gr.Button("Run Code") | |
| code_output = gr.Textbox(label="Output", lines=10) | |
| run_button.click(execute_code, inputs=code_input, outputs=code_output) | |
| with gr.Tab("Logs"): | |
| gr.Markdown("### 📜 Logs") | |
| log_output = gr.Textbox(label="Logs", lines=10, interactive=False) | |
| # Launch the Gradio app | |
| demo.launch() | |