rigelbar commited on
Commit
cdde7db
·
verified ·
1 Parent(s): 775a0ec
Files changed (7) hide show
  1. README.md +41 -3
  2. app.py +16 -0
  3. handler.py +20 -0
  4. inference.py +37 -0
  5. model.py +17 -0
  6. model.safetensors +3 -0
  7. requirements.txt +5 -0
README.md CHANGED
@@ -1,3 +1,41 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: image-classification
3
+ license: mit
4
+ library_name: pytorch
5
+ tags: [pytorch, minimal, demo]
6
+ model-index:
7
+ - name: tiny-digits-cnn
8
+ results:
9
+ - task: {type: image-classification}
10
+ metrics:
11
+ - type: accuracy
12
+ value: 0.00 # demo-only (untrained)
13
+ ---
14
+
15
+ # Tiny Digits CNN (demo-only)
16
+
17
+ Toy 28×28 grayscale classifier (10 classes 0–9) for **Hugging Face deployment tests**.
18
+ No training—weights are randomly initialized just to validate repo layout, Spaces, and Inference Endpoints.
19
+
20
+ ## Files
21
+ - `model.py` — tiny CNN
22
+ - `model.safetensors` — weights (create with `python generate_weights.py`)
23
+ - `inference.py` — load → preprocess → predict
24
+ - `handler.py` — Endpoint handler (`EndpointHandler`)
25
+ - `app.py` — Gradio Space UI
26
+ - `requirements.txt`, `.gitattributes`, `LICENSE`
27
+
28
+ ## Quickstart (local)
29
+ ```bash
30
+ pip install -r requirements.txt
31
+ python generate_weights.py
32
+ python app.py
33
+
34
+ Call via Hosted Inference API (if enabled) or Endpoint
35
+ # Replace with your endpoint URL or model API URL
36
+ API=https://api-inference.huggingface.co/models/ORG/REPO
37
+ curl -X POST "$API" \
38
+ -H "Authorization: Bearer $HF_TOKEN" \
39
+ -H "Content-Type: application/json" \
40
+ -d '{"inputs": {"image_base64": "<PUT_BASE64_IMAGE_HERE>"}}'
41
+
app.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from inference import predict_from_pil
3
+
4
+ def infer(img):
5
+ return predict_from_pil(img)
6
+
7
+ demo = gr.Interface(
8
+ fn=infer,
9
+ inputs=gr.Image(type="pil", label="28x28 grayscale image (any image will be resized)"),
10
+ outputs=gr.Label(num_top_classes=3, label="Top-3 predictions"),
11
+ title="Tiny Digits (Toy Classifier)",
12
+ description="Ultra-small CNN demo for deployment tests. Not trained; for pipeline sanity only."
13
+ )
14
+
15
+ if __name__ == "__main__":
16
+ demo.launch()
handler.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Inference Endpoint handler
2
+ # Expects {"inputs": {"image_base64": "<...>"}} or {"inputs": "<base64>"}
3
+ from inference import load_model, predict_from_base64
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ # path points to the repo dir inside the container
8
+ self.model = load_model(path)
9
+
10
+ def __call__(self, data):
11
+ inputs = data.get("inputs", {})
12
+ if isinstance(inputs, str):
13
+ b64 = inputs
14
+ else:
15
+ b64 = inputs.get("image_base64")
16
+ if not b64:
17
+ return {"error": "Provide base64 image under 'inputs' or 'inputs.image_base64'."}
18
+ preds = predict_from_base64(b64)
19
+ # Return {label: prob}
20
+ return {"predictions": preds}
inference.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io, base64
2
+ import numpy as np
3
+ from PIL import Image
4
+ import torch
5
+ from safetensors.torch import load_file
6
+ from model import TinyConv
7
+
8
+ LABELS = [str(i) for i in range(10)]
9
+
10
+ def load_model(path: str = ""):
11
+ m = TinyConv().eval()
12
+ try:
13
+ state = load_file(f"{path}model.safetensors")
14
+ m.load_state_dict(state, strict=True)
15
+ except Exception:
16
+ # Fallback to randomly initialized weights if file missing
17
+ pass
18
+ return m
19
+
20
+ MODEL = load_model()
21
+
22
+ def preprocess_pil(img: Image.Image) -> torch.Tensor:
23
+ img = img.convert("L").resize((28, 28))
24
+ x = torch.from_numpy(np.array(img)).float() / 255.0
25
+ x = x.unsqueeze(0).unsqueeze(0) # [1,1,28,28]
26
+ return x
27
+
28
+ def predict_from_pil(img: Image.Image):
29
+ x = preprocess_pil(img)
30
+ with torch.inference_mode():
31
+ logits = MODEL(x)
32
+ probs = torch.softmax(logits, dim=-1)[0].tolist()
33
+ return {label: float(probs[i]) for i, label in enumerate(LABELS)}
34
+
35
+ def predict_from_base64(b64: str):
36
+ img = Image.open(io.BytesIO(base64.b64decode(b64)))
37
+ return predict_from_pil(img)
model.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ class TinyConv(nn.Module):
6
+ def __init__(self, num_classes: int = 10):
7
+ super().__init__()
8
+ self.conv1 = nn.Conv2d(1, 8, 3, padding=1) # 1x28x28 -> 8x28x28
9
+ self.conv2 = nn.Conv2d(8, 16, 3, padding=1) # 8x28x28 -> 16x28x28
10
+ self.pool = nn.AdaptiveAvgPool2d((1, 1)) # 16x1x1
11
+ self.fc = nn.Linear(16, num_classes)
12
+
13
+ def forward(self, x):
14
+ x = F.relu(self.conv1(x))
15
+ x = F.relu(self.conv2(x))
16
+ x = self.pool(x).flatten(1)
17
+ return self.fc(x)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2041ab75c8398326a846e1a5f78a00a1a188e17fe512440ba7985315bff09f6c
3
+ size 6104
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ numpy
3
+ pillow
4
+ safetensors
5
+ gradio # only needed if you’ll create a Space demo