Update app.py
Browse files
app.py
CHANGED
|
@@ -27,57 +27,57 @@ def preprocess_image(image):
|
|
| 27 |
# Convert to PIL Image if needed
|
| 28 |
if isinstance(image, np.ndarray):
|
| 29 |
image = Image.fromarray(image)
|
| 30 |
-
|
| 31 |
# Convert to RGB if necessary
|
| 32 |
if image.mode != 'RGB':
|
| 33 |
image = image.convert('RGB')
|
| 34 |
-
|
| 35 |
# Resize to model's expected input size
|
| 36 |
image = image.resize((224, 224))
|
| 37 |
-
|
| 38 |
# Convert to numpy array
|
| 39 |
img_array = np.array(image).astype('float32')
|
| 40 |
-
|
| 41 |
# Add batch dimension
|
| 42 |
img_array = np.expand_dims(img_array, axis=0)
|
| 43 |
-
|
| 44 |
# Note: preprocessing is handled by the Lambda layer in the model
|
| 45 |
# So we don't need to apply preprocess_input here
|
| 46 |
-
|
| 47 |
return img_array
|
| 48 |
|
| 49 |
def predict(image):
|
| 50 |
"""
|
| 51 |
Predict whether an image is AI-generated or real.
|
| 52 |
-
|
| 53 |
Args:
|
| 54 |
image: Input image (PIL Image or numpy array)
|
| 55 |
-
|
| 56 |
Returns:
|
| 57 |
Dictionary with class labels and their probabilities
|
| 58 |
"""
|
| 59 |
if model is None:
|
| 60 |
return {"Error": 1.0}
|
| 61 |
-
|
| 62 |
if image is None:
|
| 63 |
return {"Error": 1.0}
|
| 64 |
-
|
| 65 |
try:
|
| 66 |
# Preprocess the image
|
| 67 |
processed_image = preprocess_image(image)
|
| 68 |
-
|
| 69 |
-
# Make prediction (training=False
|
| 70 |
-
prediction = model.predict(processed_image, verbose=0
|
| 71 |
-
|
| 72 |
# Convert to probabilities for each class
|
| 73 |
ai_prob = float(prediction)
|
| 74 |
real_prob = 1.0 - ai_prob
|
| 75 |
-
|
| 76 |
return {
|
| 77 |
"Real Image": real_prob,
|
| 78 |
"AI-Generated": ai_prob
|
| 79 |
}
|
| 80 |
-
|
| 81 |
except Exception as e:
|
| 82 |
print(f"Prediction error: {e}")
|
| 83 |
return {"Error": 1.0}
|
|
@@ -85,35 +85,35 @@ def predict(image):
|
|
| 85 |
def classify_image(image):
|
| 86 |
"""
|
| 87 |
Main classification function with detailed output.
|
| 88 |
-
|
| 89 |
Args:
|
| 90 |
image: Input image
|
| 91 |
-
|
| 92 |
Returns:
|
| 93 |
Tuple of (label_output, confidence_text)
|
| 94 |
"""
|
| 95 |
if model is None:
|
| 96 |
return None, "β **Model failed to load. Please check the model file.**"
|
| 97 |
-
|
| 98 |
if image is None:
|
| 99 |
return None, "Please upload an image to analyze."
|
| 100 |
-
|
| 101 |
results = predict(image)
|
| 102 |
-
|
| 103 |
if "Error" in results:
|
| 104 |
return None, "Error processing image. Please try again."
|
| 105 |
-
|
| 106 |
# Determine the classification
|
| 107 |
ai_prob = results["AI-Generated"]
|
| 108 |
real_prob = results["Real Image"]
|
| 109 |
-
|
| 110 |
if ai_prob > 0.5:
|
| 111 |
classification = "AI-Generated"
|
| 112 |
confidence = ai_prob
|
| 113 |
else:
|
| 114 |
classification = "Real Image"
|
| 115 |
confidence = real_prob
|
| 116 |
-
|
| 117 |
# Create detailed analysis text
|
| 118 |
analysis = f"""
|
| 119 |
### Analysis Results
|
|
@@ -124,31 +124,27 @@ def classify_image(image):
|
|
| 124 |
- Real Image: {real_prob:.2%}
|
| 125 |
- AI-Generated: {ai_prob:.2%}
|
| 126 |
---
|
| 127 |
-
*Note: This model was trained on CIFAKE and Tiny GenImage datasets.
|
| 128 |
Results may vary for images outside these domains.*
|
| 129 |
"""
|
| 130 |
-
|
| 131 |
return results, analysis
|
| 132 |
|
| 133 |
# Create the Gradio interface
|
| 134 |
with gr.Blocks(
|
| 135 |
title="AI Image Detector"
|
| 136 |
-
# theme=gr.themes.Soft(
|
| 137 |
-
# primary_hue="blue",
|
| 138 |
-
# secondary_hue="gray",
|
| 139 |
-
# )
|
| 140 |
) as demo:
|
| 141 |
gr.Markdown(
|
| 142 |
"""
|
| 143 |
# π AI Image Detector
|
| 144 |
-
|
| 145 |
Upload an image to detect whether it's **AI-generated** or a **real photograph**.
|
| 146 |
-
|
| 147 |
-
This model uses transfer learning with MobileNetV2 and was trained on the
|
| 148 |
CIFAKE and Tiny GenImage datasets, achieving ~95% accuracy on validation data.
|
| 149 |
"""
|
| 150 |
)
|
| 151 |
-
|
| 152 |
with gr.Row():
|
| 153 |
with gr.Column(scale=1):
|
| 154 |
input_image = gr.Image(
|
|
@@ -156,13 +152,13 @@ with gr.Blocks(
|
|
| 156 |
type="pil",
|
| 157 |
height=400
|
| 158 |
)
|
| 159 |
-
|
| 160 |
submit_btn = gr.Button(
|
| 161 |
"π Analyze Image",
|
| 162 |
variant="primary",
|
| 163 |
size="lg"
|
| 164 |
)
|
| 165 |
-
|
| 166 |
gr.Markdown(
|
| 167 |
"""
|
| 168 |
### Tips for best results:
|
|
@@ -171,46 +167,46 @@ with gr.Blocks(
|
|
| 171 |
- Works with JPG, PNG, and other common formats
|
| 172 |
"""
|
| 173 |
)
|
| 174 |
-
|
| 175 |
with gr.Column(scale=1):
|
| 176 |
output_label = gr.Label(
|
| 177 |
label="Classification Results",
|
| 178 |
num_top_classes=2
|
| 179 |
)
|
| 180 |
-
|
| 181 |
output_text = gr.Markdown(
|
| 182 |
label="Detailed Analysis"
|
| 183 |
)
|
| 184 |
-
|
| 185 |
# Connect the interface
|
| 186 |
submit_btn.click(
|
| 187 |
fn=classify_image,
|
| 188 |
inputs=input_image,
|
| 189 |
outputs=[output_label, output_text]
|
| 190 |
)
|
| 191 |
-
|
| 192 |
input_image.change(
|
| 193 |
fn=classify_image,
|
| 194 |
inputs=input_image,
|
| 195 |
outputs=[output_label, output_text]
|
| 196 |
)
|
| 197 |
-
|
| 198 |
gr.Markdown(
|
| 199 |
"""
|
| 200 |
---
|
| 201 |
### About this model
|
| 202 |
-
|
| 203 |
- **Architecture:** MobileNetV2 with custom classification head
|
| 204 |
- **Training Data:** CIFAKE dataset + Tiny GenImage dataset (~128,000 images)
|
| 205 |
- **Input Size:** 224Γ224 pixels
|
| 206 |
- **Classes:** Real (0) vs AI-Generated (1)
|
| 207 |
-
|
| 208 |
-
**Disclaimer:** This tool is for educational and research purposes.
|
| 209 |
AI detection is an evolving field, and no detector is 100% accurate.
|
| 210 |
-
|
| 211 |
"""
|
| 212 |
)
|
| 213 |
|
| 214 |
# Launch the app
|
| 215 |
if __name__ == "__main__":
|
| 216 |
-
demo.launch()
|
|
|
|
| 27 |
# Convert to PIL Image if needed
|
| 28 |
if isinstance(image, np.ndarray):
|
| 29 |
image = Image.fromarray(image)
|
| 30 |
+
|
| 31 |
# Convert to RGB if necessary
|
| 32 |
if image.mode != 'RGB':
|
| 33 |
image = image.convert('RGB')
|
| 34 |
+
|
| 35 |
# Resize to model's expected input size
|
| 36 |
image = image.resize((224, 224))
|
| 37 |
+
|
| 38 |
# Convert to numpy array
|
| 39 |
img_array = np.array(image).astype('float32')
|
| 40 |
+
|
| 41 |
# Add batch dimension
|
| 42 |
img_array = np.expand_dims(img_array, axis=0)
|
| 43 |
+
|
| 44 |
# Note: preprocessing is handled by the Lambda layer in the model
|
| 45 |
# So we don't need to apply preprocess_input here
|
| 46 |
+
|
| 47 |
return img_array
|
| 48 |
|
| 49 |
def predict(image):
|
| 50 |
"""
|
| 51 |
Predict whether an image is AI-generated or real.
|
| 52 |
+
|
| 53 |
Args:
|
| 54 |
image: Input image (PIL Image or numpy array)
|
| 55 |
+
|
| 56 |
Returns:
|
| 57 |
Dictionary with class labels and their probabilities
|
| 58 |
"""
|
| 59 |
if model is None:
|
| 60 |
return {"Error": 1.0}
|
| 61 |
+
|
| 62 |
if image is None:
|
| 63 |
return {"Error": 1.0}
|
| 64 |
+
|
| 65 |
try:
|
| 66 |
# Preprocess the image
|
| 67 |
processed_image = preprocess_image(image)
|
| 68 |
+
|
| 69 |
+
# Make prediction (removed training=False parameter - not supported in Keras 3)
|
| 70 |
+
prediction = model.predict(processed_image, verbose=0)[0][0]
|
| 71 |
+
|
| 72 |
# Convert to probabilities for each class
|
| 73 |
ai_prob = float(prediction)
|
| 74 |
real_prob = 1.0 - ai_prob
|
| 75 |
+
|
| 76 |
return {
|
| 77 |
"Real Image": real_prob,
|
| 78 |
"AI-Generated": ai_prob
|
| 79 |
}
|
| 80 |
+
|
| 81 |
except Exception as e:
|
| 82 |
print(f"Prediction error: {e}")
|
| 83 |
return {"Error": 1.0}
|
|
|
|
| 85 |
def classify_image(image):
|
| 86 |
"""
|
| 87 |
Main classification function with detailed output.
|
| 88 |
+
|
| 89 |
Args:
|
| 90 |
image: Input image
|
| 91 |
+
|
| 92 |
Returns:
|
| 93 |
Tuple of (label_output, confidence_text)
|
| 94 |
"""
|
| 95 |
if model is None:
|
| 96 |
return None, "β **Model failed to load. Please check the model file.**"
|
| 97 |
+
|
| 98 |
if image is None:
|
| 99 |
return None, "Please upload an image to analyze."
|
| 100 |
+
|
| 101 |
results = predict(image)
|
| 102 |
+
|
| 103 |
if "Error" in results:
|
| 104 |
return None, "Error processing image. Please try again."
|
| 105 |
+
|
| 106 |
# Determine the classification
|
| 107 |
ai_prob = results["AI-Generated"]
|
| 108 |
real_prob = results["Real Image"]
|
| 109 |
+
|
| 110 |
if ai_prob > 0.5:
|
| 111 |
classification = "AI-Generated"
|
| 112 |
confidence = ai_prob
|
| 113 |
else:
|
| 114 |
classification = "Real Image"
|
| 115 |
confidence = real_prob
|
| 116 |
+
|
| 117 |
# Create detailed analysis text
|
| 118 |
analysis = f"""
|
| 119 |
### Analysis Results
|
|
|
|
| 124 |
- Real Image: {real_prob:.2%}
|
| 125 |
- AI-Generated: {ai_prob:.2%}
|
| 126 |
---
|
| 127 |
+
*Note: This model was trained on CIFAKE and Tiny GenImage datasets.
|
| 128 |
Results may vary for images outside these domains.*
|
| 129 |
"""
|
| 130 |
+
|
| 131 |
return results, analysis
|
| 132 |
|
| 133 |
# Create the Gradio interface
|
| 134 |
with gr.Blocks(
|
| 135 |
title="AI Image Detector"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
) as demo:
|
| 137 |
gr.Markdown(
|
| 138 |
"""
|
| 139 |
# π AI Image Detector
|
| 140 |
+
|
| 141 |
Upload an image to detect whether it's **AI-generated** or a **real photograph**.
|
| 142 |
+
|
| 143 |
+
This model uses transfer learning with MobileNetV2 and was trained on the
|
| 144 |
CIFAKE and Tiny GenImage datasets, achieving ~95% accuracy on validation data.
|
| 145 |
"""
|
| 146 |
)
|
| 147 |
+
|
| 148 |
with gr.Row():
|
| 149 |
with gr.Column(scale=1):
|
| 150 |
input_image = gr.Image(
|
|
|
|
| 152 |
type="pil",
|
| 153 |
height=400
|
| 154 |
)
|
| 155 |
+
|
| 156 |
submit_btn = gr.Button(
|
| 157 |
"π Analyze Image",
|
| 158 |
variant="primary",
|
| 159 |
size="lg"
|
| 160 |
)
|
| 161 |
+
|
| 162 |
gr.Markdown(
|
| 163 |
"""
|
| 164 |
### Tips for best results:
|
|
|
|
| 167 |
- Works with JPG, PNG, and other common formats
|
| 168 |
"""
|
| 169 |
)
|
| 170 |
+
|
| 171 |
with gr.Column(scale=1):
|
| 172 |
output_label = gr.Label(
|
| 173 |
label="Classification Results",
|
| 174 |
num_top_classes=2
|
| 175 |
)
|
| 176 |
+
|
| 177 |
output_text = gr.Markdown(
|
| 178 |
label="Detailed Analysis"
|
| 179 |
)
|
| 180 |
+
|
| 181 |
# Connect the interface
|
| 182 |
submit_btn.click(
|
| 183 |
fn=classify_image,
|
| 184 |
inputs=input_image,
|
| 185 |
outputs=[output_label, output_text]
|
| 186 |
)
|
| 187 |
+
|
| 188 |
input_image.change(
|
| 189 |
fn=classify_image,
|
| 190 |
inputs=input_image,
|
| 191 |
outputs=[output_label, output_text]
|
| 192 |
)
|
| 193 |
+
|
| 194 |
gr.Markdown(
|
| 195 |
"""
|
| 196 |
---
|
| 197 |
### About this model
|
| 198 |
+
|
| 199 |
- **Architecture:** MobileNetV2 with custom classification head
|
| 200 |
- **Training Data:** CIFAKE dataset + Tiny GenImage dataset (~128,000 images)
|
| 201 |
- **Input Size:** 224Γ224 pixels
|
| 202 |
- **Classes:** Real (0) vs AI-Generated (1)
|
| 203 |
+
|
| 204 |
+
**Disclaimer:** This tool is for educational and research purposes.
|
| 205 |
AI detection is an evolving field, and no detector is 100% accurate.
|
| 206 |
+
|
| 207 |
"""
|
| 208 |
)
|
| 209 |
|
| 210 |
# Launch the app
|
| 211 |
if __name__ == "__main__":
|
| 212 |
+
demo.launch()
|