SahilSingh0 commited on
Commit
bc9aab1
Β·
verified Β·
1 Parent(s): f1fd10a

update with fixes

Browse files
Files changed (2) hide show
  1. app.py +71 -34
  2. requirements.txt +5 -3
app.py CHANGED
@@ -1,40 +1,77 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load zero-shot classification pipeline
5
- classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
6
-
7
- # Labels to classify as AI-written or Human-written
8
- labels = ["AI-generated text", "Human-written text"]
9
-
10
- def detect_ai_content(text):
11
- result = classifier(text, labels)
12
- scores = dict(zip(result["labels"], result["scores"]))
13
- ai_score = scores["AI-generated text"]
14
- human_score = scores["Human-written text"]
15
-
16
- if ai_score > human_score:
17
- verdict = "⚠️ This text looks AI-Generated"
18
- else:
19
- verdict = "βœ… This text looks Human-Written"
20
-
21
- return {
22
- "AI Probability": f"{ai_score:.2%}",
23
- "Human Probability": f"{human_score:.2%}",
24
- "Verdict": verdict
25
- }
26
-
27
- # Gradio Interface
28
- demo = gr.Interface(
29
- fn=detect_ai_content,
30
- inputs=gr.Textbox(lines=10, placeholder="Paste text here..."),
31
- outputs=[
32
- gr.Label(num_top_classes=2, label="Probabilities"),
33
- gr.Textbox(label="Verdict")
34
- ],
35
- title="AI Content Detector",
36
- description="Detect whether the given text is AI-generated or Human-written."
37
- )
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  if __name__ == "__main__":
40
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # πŸ”Ή Load model once and cache it
5
+ # This runs only at startup, not on every request
6
+ print("πŸ”„ Loading AI detection model...")
7
+ pipe = pipeline("text-classification", model="roberta-base-openai-detector")
8
+ print("βœ… Model loaded successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ def detect_text(text: str):
11
+ """Detect whether the text is AI-generated or Human-written."""
12
+ if not text.strip():
13
+ return {}, "❌ Please enter some text."
14
+
15
+ try:
16
+ results = pipe(text)
17
+
18
+ # Convert results into a probability dictionary
19
+ probs = {r["label"]: float(r["score"]) for r in results}
20
+
21
+ # Get label with highest probability
22
+ verdict = str(max(probs, key=probs.get))
23
+
24
+ # Make verdict more user-friendly
25
+ if verdict.upper() in ["LABEL_0", "FAKE", "AI"]:
26
+ verdict_message = "πŸ€– This looks AI-generated"
27
+ elif verdict.upper() in ["LABEL_1", "REAL", "HUMAN"]:
28
+ verdict_message = "πŸ“ This looks Human-written"
29
+ else:
30
+ verdict_message = f"⚠️ Unknown verdict: {verdict}"
31
+
32
+ return probs, verdict_message
33
+
34
+ except Exception as e:
35
+ return {}, f"❌ Error: {str(e)}"
36
+
37
+
38
+ with gr.Blocks() as demo:
39
+ gr.Markdown(
40
+ """
41
+ # πŸ•΅οΈ AI Content Detector
42
+ Paste some text below and check if it's **AI-generated or Human-written**.
43
+ """
44
+ )
45
+
46
+ with gr.Row():
47
+ text_input = gr.Textbox(
48
+ label="✍️ Input Text",
49
+ lines=10,
50
+ placeholder="Paste text here..."
51
+ )
52
+
53
+ with gr.Row():
54
+ output_probs = gr.Label(label="πŸ“Š Probabilities")
55
+ output_verdict = gr.Textbox(label="βœ… Verdict", interactive=False)
56
+
57
+ with gr.Row():
58
+ submit_btn = gr.Button("πŸ”Ž Submit", variant="primary")
59
+ clear_btn = gr.Button("🧹 Clear")
60
+
61
+ # Submit action
62
+ submit_btn.click(
63
+ fn=detect_text,
64
+ inputs=text_input,
65
+ outputs=[output_probs, output_verdict],
66
+ )
67
+
68
+ # Clear action
69
+ clear_btn.click(
70
+ fn=lambda: ("", {}, ""),
71
+ inputs=[],
72
+ outputs=[text_input, output_probs, output_verdict],
73
+ )
74
+
75
+ # πŸ”Ή Launch app
76
  if __name__ == "__main__":
77
  demo.launch()
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- gradio
2
- transformers
3
- torch
 
 
 
1
+ gradio==4.36.1
2
+ transformers==4.43.3
3
+ torch>=2.0.0
4
+ accelerate>=0.33.0
5
+ safetensors>=0.4.2