Fadri commited on
Commit
619cee0
·
verified ·
1 Parent(s): 910d0af

Upload 7 files

Browse files

Image Classification first commit

.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ examples/car.jpg filter=lfs diff=lfs merge=lfs -text
37
+ examples/cat.jpg filter=lfs diff=lfs merge=lfs -text
38
+ examples/ship.jpg filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load models
5
+ cifar10_classifier = pipeline("image-classification", model="Fadri/results")
6
+ clip_detector = pipeline(model="openai/clip-vit-large-patch14", task="zero-shot-image-classification")
7
+
8
+ # CIFAR-10 Klassen
9
+ labels_cifar10 = [
10
+ 'airplane', 'automobile', 'bird', 'cat', 'deer',
11
+ 'dog', 'frog', 'horse', 'ship', 'truck'
12
+ ]
13
+
14
+ def classify_image(image):
15
+ # Klassifikation mit deinem trainierten CIFAR-10 Modell
16
+ cifar10_results = cifar10_classifier(image)
17
+ cifar10_output = {result['label']: result['score'] for result in cifar10_results}
18
+
19
+ # Zero-Shot-Klassifikation mit CLIP
20
+ clip_results = clip_detector(image, candidate_labels=labels_cifar10)
21
+ clip_output = {result['label']: result['score'] for result in clip_results}
22
+
23
+ return {
24
+ "CIFAR-10 ViT Klassifikation": cifar10_output,
25
+ "CLIP Zero-Shot Klassifikation": clip_output
26
+ }
27
+
28
+ # Beispielbilder - Du kannst diese später anpassen
29
+ example_images = [
30
+ ["examples/airplane.jpg"],
31
+ ["examples/car.jpg"],
32
+ ["examples/dog.jpg"],
33
+ ["examples/cat.jpg"],
34
+ ["examples/ship.jpg"],
35
+ ["examples/truck.jpg"]
36
+ ]
37
+
38
+ # Gradio Interface
39
+ iface = gr.Interface(
40
+ fn=classify_image,
41
+ inputs=gr.Image(type="filepath"),
42
+ outputs=gr.JSON(),
43
+ title="CIFAR-10 Klassifikation",
44
+ description="Lade ein Bild hoch und vergleiche die Ergebnisse zwischen deinem trainierten ViT Modell und dem Zero-Shot CLIP Modell für CIFAR-10 Klassen.",
45
+ examples=example_images
46
+ )
47
+
48
+ iface.launch()
examples/airplane.jpg ADDED
examples/car.jpg ADDED

Git LFS Details

  • SHA256: f98ac7818aa611f2d7f45f2867481148eb1caab5bc6b7b54bf25787dad2d147b
  • Pointer size: 131 Bytes
  • Size of remote file: 968 kB
examples/cat.jpg ADDED

Git LFS Details

  • SHA256: 5a31185b51cf359545dc84c29c84f8f19cf643961c26da40eac7f5ddaf3a92cf
  • Pointer size: 131 Bytes
  • Size of remote file: 105 kB
examples/dog.jpg ADDED
examples/ship.jpg ADDED

Git LFS Details

  • SHA256: ef38f48a21d25ca69a54d10bba57328f4c7c1c6c58319af86e99f1fa4865780d
  • Pointer size: 132 Bytes
  • Size of remote file: 1.33 MB
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
4
+ torchvision