ReACT / app.py
HAMMALE's picture
Update app.py
569b494 verified
import os
import re
import json
import gradio as gr
from typing import List, Dict, Any, Generator
import requests
from datetime import datetime
import ast
import operator as op
import wikipedia
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
class Tool:
def __init__(self, name: str, description: str, func):
self.name = name
self.description = description
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def duckduckgo_search(query: str) -> str:
try:
url = "https://api.duckduckgo.com/"
params = {'q': query, 'format': 'json', 'no_html': 1, 'skip_disambig': 1}
response = requests.get(url, params=params, timeout=10)
data = response.json()
if data.get('Abstract'):
return f"Search result: {data['Abstract']}"
elif data.get('RelatedTopics') and len(data['RelatedTopics']) > 0:
results = [topic['Text'] for topic in data['RelatedTopics'][:3] if 'Text' in topic]
return f"Search results: {' | '.join(results)}" if results else "No results found."
return "No results found."
except Exception as e:
return f"Search error: {str(e)}"
def wikipedia_search(query: str) -> str:
try:
wikipedia.set_lang("en")
summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
return f"Wikipedia: {summary}"
except wikipedia.exceptions.DisambiguationError as e:
return f"Wikipedia: Multiple results found. Options: {', '.join(e.options[:5])}"
except wikipedia.exceptions.PageError:
return f"Wikipedia: No page found for '{query}'."
except Exception as e:
return f"Wikipedia error: {str(e)}"
def get_weather(location: str) -> str:
try:
url = f"https://wttr.in/{location}?format=j1"
response = requests.get(url, timeout=10)
data = response.json()
current = data['current_condition'][0]
return f"Weather in {location}: {current['weatherDesc'][0]['value']}, {current['temp_C']}Β°C, Humidity: {current['humidity']}%"
except Exception as e:
return f"Weather error: {str(e)}"
def calculate(expression: str) -> str:
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul, ast.Div: op.truediv, ast.Pow: op.pow, ast.USub: op.neg, ast.Mod: op.mod}
def eval_expr(node):
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return operators[type(node.op)](eval_expr(node.left), eval_expr(node.right))
elif isinstance(node, ast.UnaryOp):
return operators[type(node.op)](eval_expr(node.operand))
raise TypeError(node)
try:
result = eval_expr(ast.parse(expression.strip(), mode='eval').body)
return f"Result: {result}"
except Exception as e:
return f"Calculation error: {str(e)}"
def python_repl(code: str) -> str:
try:
safe_builtins = {'abs': abs, 'round': round, 'min': min, 'max': max, 'sum': sum, 'len': len, 'range': range, 'list': list, 'dict': dict, 'str': str, 'int': int, 'float': float, 'print': print}
namespace = {'__builtins__': safe_builtins}
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
exec(code, namespace)
output = sys.stdout.getvalue()
sys.stdout = old_stdout
result_vars = {k: v for k, v in namespace.items() if k != '__builtins__' and not k.startswith('_')}
return f"Python output: {output if output else (str(result_vars) if result_vars else 'Code executed')}"
except Exception as e:
return f"Python error: {str(e)}"
TOOLS = [
Tool("duckduckgo_search", "Search the web. Input: search query.", duckduckgo_search),
Tool("wikipedia_search", "Search Wikipedia. Input: search query.", wikipedia_search),
Tool("get_weather", "Get weather for location. Input: city name.", get_weather),
Tool("calculate", "Calculate math expression. Input: expression.", calculate),
Tool("python_repl", "Execute Python code. Input: code.", python_repl),
]
MODEL_NAME = "openai/gpt-oss-20b"
model = None
tokenizer = None
model_loaded = False
def download_and_load_model(progress=gr.Progress()):
global model, tokenizer, model_loaded
try:
progress(0, desc="Downloading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
progress(0.4, desc="Downloading model (this may take several minutes)...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
device_map="auto",
low_cpu_mem_usage=True,
)
progress(0.95, desc="Finalizing...")
model_loaded = True
progress(1.0, desc="Model loaded!")
return f"Model '{MODEL_NAME}' loaded successfully!"
except Exception as e:
return f"Error: {str(e)}"
def get_tool_descriptions() -> str:
return "\n".join([f"- {tool.name}: {tool.description}" for tool in TOOLS])
THINK_ONLY_PROMPT = """You are an expert problem solver. Use your knowledge and reasoning to answer questions.
You must show your complete reasoning process using this format:
Thought: [Explain what you're thinking and why]
Thought: [Continue your reasoning, breaking down the problem]
Thought: [Build toward the solution step by step]
Answer: [Your final, complete answer]
Important:
- Show multiple thought steps
- Break down complex problems
- Explain your reasoning clearly
- Only provide the Answer when you're certain
Question: {question}
Let me think through this step by step:
Thought:"""
ACT_ONLY_PROMPT = """You are an AI agent with access to external tools. You MUST use tools to find information.
Available tools:
{tools}
You MUST respond ONLY with actions - no thinking out loud:
Action: [exact tool name]
Action Input: [specific input for the tool]
After receiving the Observation, you can:
- Call another tool if you need more information
- Provide the final Answer when you have enough information
Format:
Action: tool_name
Action Input: input_string
Then after observation:
Action: another_tool
Action Input: another_input
OR
Answer: [final answer based on observations]
Question: {question}
Action:"""
REACT_PROMPT = """You are an expert AI agent that combines reasoning with tool usage (ReAct paradigm).
Available tools:
{tools}
You MUST alternate between thinking and acting:
1. Thought: [Reason about what information you need and which tool to use]
2. Action: [exact tool name]
3. Action Input: [specific input]
4. Observation: [tool result - will be provided to you]
5. Thought: [Analyze the observation and decide next steps]
6. Repeat 2-5 until you have enough information
7. Thought: [Final reasoning with all gathered information]
8. Answer: [Complete final answer]
Rules:
- ALWAYS start with a Thought explaining your strategy
- After each Observation, think about what you learned
- Use multiple tools if needed
- Only give Answer when you have sufficient information
- Be specific in your Action Inputs
Question: {question}
Thought:"""
def parse_action(text: str) -> tuple:
action_match = re.search(r'Action:\s*(\w+)', text, re.IGNORECASE)
input_match = re.search(r'Action Input:\s*(.+?)(?=\n(?:Thought:|Action:|Answer:|$))', text, re.IGNORECASE | re.DOTALL)
return (action_match.group(1).strip(), input_match.group(1).strip()) if action_match and input_match else (None, None)
def call_tool(tool_name: str, tool_input: str) -> str:
for tool in TOOLS:
if tool.name.lower() == tool_name.lower():
return tool(tool_input)
return f"Error: Tool '{tool_name}' not found."
def call_llm(prompt: str, max_tokens: int = 500) -> str:
if not model_loaded:
return "Error: Model not loaded."
try:
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
if torch.cuda.is_available():
inputs = {k: v.to(model.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_tokens,
temperature=0.7,
do_sample=True,
top_p=0.9,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
response = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
return response.strip()
except Exception as e:
return f"Error during generation: {str(e)}"
def think_only_mode(question: str) -> Generator[str, None, None]:
if not model_loaded:
yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
return
yield "🧠 **Mode: Think-Only (Chain-of-Thought)**\n\n"
yield "πŸ’­ Generating reasoning steps...\n\n"
response = call_llm(THINK_ONLY_PROMPT.format(question=question), max_tokens=800)
if response.startswith("Error"):
yield f"❌ {response}\n\n"
return
for line in response.split('\n'):
if line.strip():
if line.strip().startswith('Thought:'):
yield f"πŸ’­ **{line.strip()}**\n\n"
elif line.strip().startswith('Answer:'):
yield f"βœ… **{line.strip()}**\n\n"
else:
yield f"{line}\n\n"
yield "\n---\nβœ“ **Completed**\n"
def act_only_mode(question: str, max_iterations: int = 5) -> Generator[str, None, None]:
if not model_loaded:
yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
return
yield "πŸ”§ **Mode: Act-Only (Tool Use Only)**\n\n"
conversation = ACT_ONLY_PROMPT.format(question=question, tools=get_tool_descriptions())
for iteration in range(max_iterations):
yield f"πŸ”„ **Iteration {iteration + 1}**\n\n"
response = call_llm(conversation, max_tokens=300)
if response.startswith("Error"):
yield f"❌ {response}\n\n"
return
if 'Answer:' in response:
match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
if match:
yield f"βœ… **Answer:** {match.group(1).strip()}\n\n"
break
action_name, action_input = parse_action(response)
if action_name and action_input:
yield f"πŸ”§ **Action:** `{action_name}`\n"
yield f"πŸ“ **Action Input:** {action_input}\n\n"
yield f"⏳ Executing tool...\n\n"
observation = call_tool(action_name, action_input)
yield f"πŸ‘οΈ **Observation:** {observation}\n\n"
conversation += f"\n{response}\nObservation: {observation}\n\n"
else:
yield f"⚠️ No valid action found. Response: {response}\n\n"
break
yield "\n---\nβœ“ **Completed**\n"
def react_mode(question: str, max_iterations: int = 5) -> Generator[str, None, None]:
if not model_loaded:
yield "❌ **Error: Model not loaded. Click 'Download & Load Model' first.**\n\n"
return
yield "πŸ€– **Mode: ReAct (Reasoning + Acting)**\n\n"
conversation = REACT_PROMPT.format(question=question, tools=get_tool_descriptions())
for iteration in range(max_iterations):
yield f"πŸ”„ **Step {iteration + 1}**\n\n"
response = call_llm(conversation, max_tokens=400)
if response.startswith("Error"):
yield f"❌ {response}\n\n"
return
# Extract and display thoughts
for thought in re.findall(r'Thought:\s*(.+?)(?=\n(?:Action:|Answer:|$))', response, re.IGNORECASE | re.DOTALL):
yield f"πŸ’­ **Thought:** {thought.strip()}\n\n"
# Check for final answer
if 'Answer:' in response:
match = re.search(r'Answer:\s*(.+)', response, re.IGNORECASE | re.DOTALL)
if match:
yield f"βœ… **Answer:** {match.group(1).strip()}\n\n"
break
# Parse and execute action
action_name, action_input = parse_action(response)
if action_name and action_input:
yield f"πŸ”§ **Action:** `{action_name}`\n"
yield f"πŸ“ **Action Input:** {action_input}\n\n"
yield f"⏳ Executing tool...\n\n"
observation = call_tool(action_name, action_input)
yield f"πŸ‘οΈ **Observation:** {observation}\n\n"
conversation += f"\n{response}\nObservation: {observation}\n\nThought:"
else:
if 'Answer:' not in response:
yield f"⚠️ No action found. Response: {response}\n\n"
break
yield "\n---\nβœ“ **Completed**\n"
EXAMPLES = [
"What is 25 * 47?",
"What is the weather in Paris?",
"Who wrote 1984?",
"Calculate: 100 + 200",
]
def run_comparison(question: str, mode: str):
"""Run selected mode with real-time streaming."""
if not question.strip():
yield "Please enter a question.", "", ""
return
if mode == "Think-Only":
think_out = ""
for chunk in think_only_mode(question):
think_out += chunk
yield think_out, "", ""
elif mode == "Act-Only":
act_out = ""
for chunk in act_only_mode(question):
act_out += chunk
yield "", act_out, ""
elif mode == "ReAct":
react_out = ""
for chunk in react_mode(question):
react_out += chunk
yield "", "", react_out
else:
yield "Invalid mode selected.", "", ""
with gr.Blocks(title="LLM Reasoning Modes") as demo:
gr.Markdown("# LLM Reasoning Modes Comparison\n\n**Model:** openai/gpt-oss-20b\n\n**Tools:** DuckDuckGo | Wikipedia | Weather | Calculator | Python")
with gr.Row():
download_btn = gr.Button("Download & Load Model", variant="primary", size="lg")
model_status = gr.Textbox(label="Status", value="Click to download", interactive=False)
with gr.Row():
with gr.Column(scale=3):
question_input = gr.Textbox(label="Question", lines=3)
mode_dropdown = gr.Dropdown(choices=["Think-Only", "Act-Only", "ReAct"], value="ReAct", label="Mode")
submit_btn = gr.Button("Run", variant="primary", size="lg")
with gr.Column(scale=1):
gr.Markdown("**Examples**")
for idx, ex in enumerate(EXAMPLES):
gr.Button(f"Ex {idx+1}", size="sm").click(fn=lambda e=ex: e, outputs=question_input)
gr.Markdown("---")
with gr.Row():
think_output = gr.Markdown(label="Think-Only")
act_output = gr.Markdown(label="Act-Only")
react_output = gr.Markdown(label="ReAct")
download_btn.click(fn=download_and_load_model, outputs=model_status)
submit_btn.click(fn=run_comparison, inputs=[question_input, mode_dropdown], outputs=[think_output, act_output, react_output])
if __name__ == "__main__":
demo.launch(share=True)