Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -60,79 +60,6 @@ def rank_images(model_name, images, text):
|
|
| 60 |
|
| 61 |
return ranked_images
|
| 62 |
|
| 63 |
-
# import spaces
|
| 64 |
-
# import gradio as gr
|
| 65 |
-
# import torch
|
| 66 |
-
# torch.jit.script = lambda f: f # Avoid script error in lambda
|
| 67 |
-
# from t2v_metrics import VQAScore, list_all_vqascore_models
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
# def update_model(model_name):
|
| 71 |
-
# return VQAScore(model=model_name, device="cuda")
|
| 72 |
-
|
| 73 |
-
# # Use global variables for model pipe and current model name
|
| 74 |
-
# global model_pipe, cur_model_name
|
| 75 |
-
# cur_model_name = "clip-flant5-xl"
|
| 76 |
-
# model_pipe = update_model(cur_model_name)
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
# # Ensure GPU context manager is imported correctly (assuming spaces is a module you have)
|
| 80 |
-
# #try:
|
| 81 |
-
# #from spaces import GPU # i believe this is wrong, spaces package does not have "GPU"
|
| 82 |
-
# #except ImportError:
|
| 83 |
-
# # GPU = lambda duration: (lambda f: f) # Dummy decorator if spaces.GPU is not available
|
| 84 |
-
|
| 85 |
-
# if torch.cuda.is_available():
|
| 86 |
-
# model_pipe.device = "cuda"
|
| 87 |
-
# else:
|
| 88 |
-
# print("CUDA is not available")
|
| 89 |
-
|
| 90 |
-
# @spaces.GPU # a duration lower than 60 does not work, leave as is.
|
| 91 |
-
# def generate(model_name, image, text):
|
| 92 |
-
# global model_pipe, cur_model_name
|
| 93 |
-
|
| 94 |
-
# if model_name != cur_model_name:
|
| 95 |
-
# cur_model_name = model_name # Update the current model name
|
| 96 |
-
# model_pipe = update_model(model_name)
|
| 97 |
-
|
| 98 |
-
# print("Image:", image) # Debug: Print image path
|
| 99 |
-
# print("Text:", text) # Debug: Print text input
|
| 100 |
-
# print("Using model:", model_name)
|
| 101 |
-
|
| 102 |
-
# try:
|
| 103 |
-
# result = model_pipe(images=[image], texts=[text]).cpu()[0][0].item() # Perform the model inference
|
| 104 |
-
# print("Result:", result)
|
| 105 |
-
# except RuntimeError as e:
|
| 106 |
-
# print(f"RuntimeError during model inference: {e}")
|
| 107 |
-
# raise e
|
| 108 |
-
|
| 109 |
-
# return result
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
# def rank_images(model_name, images, text):
|
| 113 |
-
# global model_pipe, cur_model_name
|
| 114 |
-
|
| 115 |
-
# if model_name != cur_model_name:
|
| 116 |
-
# cur_model_name = model_name # Update the current model name
|
| 117 |
-
# model_pipe = update_model(model_name)
|
| 118 |
-
|
| 119 |
-
# images = [image_tuple[0] for image_tuple in images]
|
| 120 |
-
# print("Images:", images) # Debug: Print image paths
|
| 121 |
-
# print("Text:", text) # Debug: Print text input
|
| 122 |
-
# print("Using model:", model_name)
|
| 123 |
-
|
| 124 |
-
# try:
|
| 125 |
-
# results = model_pipe(images=images, texts=[text]).cpu()[:, 0].tolist() # Perform the model inference on all images
|
| 126 |
-
# print("Initial results: should be imgs x texts", results)
|
| 127 |
-
# ranked_results = sorted(zip(images, results), key=lambda x: x[1], reverse=True) # Rank results
|
| 128 |
-
# ranked_images = [(img, f"Rank: {rank + 1} - Score: {score:.2f}") for rank, (img, score) in enumerate(ranked_results)] # Pair images with their scores and rank
|
| 129 |
-
# print("Ranked Results:", ranked_results)
|
| 130 |
-
# except RuntimeError as e:
|
| 131 |
-
# print(f"RuntimeError during model inference: {e}")
|
| 132 |
-
# raise e
|
| 133 |
-
|
| 134 |
-
# return ranked_images
|
| 135 |
-
|
| 136 |
|
| 137 |
### EXAMPLES ###
|
| 138 |
example_imgs = ["0_imgs/DALLE3.png",
|
|
@@ -141,65 +68,12 @@ example_imgs = ["0_imgs/DALLE3.png",
|
|
| 141 |
"0_imgs/SDXL.jpg"]
|
| 142 |
example_prompt0 = "Two dogs of different breeds playfully chasing around a tree"
|
| 143 |
example_prompt1 = "Two dogs of the same breed playing on the grass"
|
| 144 |
-
###
|
| 145 |
-
|
| 146 |
-
# # Create the first demo
|
| 147 |
-
# demo_vqascore = gr.Interface(
|
| 148 |
-
# fn=generate, # function to call
|
| 149 |
-
# inputs=[
|
| 150 |
-
# gr.Dropdown(["clip-flant5-xxl", "clip-flant5-xl", ], label="Model Name"),
|
| 151 |
-
# gr.Image(type="filepath"),
|
| 152 |
-
# gr.Textbox(label="Prompt")
|
| 153 |
-
# ], # define the types of inputs
|
| 154 |
-
# examples=[
|
| 155 |
-
# ["clip-flant5-xl", example_imgs[0], example_prompt0],
|
| 156 |
-
# ["clip-flant5-xl", example_imgs[0], example_prompt1],
|
| 157 |
-
# ],
|
| 158 |
-
# outputs="number", # define the type of output
|
| 159 |
-
# title="VQAScore", # title of the app
|
| 160 |
-
# description="This model evaluates the similarity between an image and a text prompt."
|
| 161 |
-
# )
|
| 162 |
|
| 163 |
-
# # Create the second demo
|
| 164 |
-
# demo_vqascore_ranking = gr.Interface(
|
| 165 |
-
# fn=rank_images, # function to call
|
| 166 |
-
# inputs=[
|
| 167 |
-
# gr.Dropdown(["clip-flant5-xl", "clip-flant5-xxl"], label="Model Name"),
|
| 168 |
-
# gr.Gallery(label="Generated Images"),
|
| 169 |
-
# gr.Textbox(label="Prompt")
|
| 170 |
-
# ], # define the types of inputs
|
| 171 |
-
# outputs=gr.Gallery(label="Ranked Images"), # define the type of output
|
| 172 |
-
# examples=[
|
| 173 |
-
# ["clip-flant5-xl", [[img, ""] for img in example_imgs], example_prompt0],
|
| 174 |
-
# ["clip-flant5-xl", [[img, ""] for img in example_imgs], example_prompt1]
|
| 175 |
-
|
| 176 |
-
# ],
|
| 177 |
-
# title="VQAScore Ranking", # title of the app
|
| 178 |
-
# description="This model ranks a gallery of images based on their similarity to a text prompt.",
|
| 179 |
-
# allow_flagging='never'
|
| 180 |
-
# )
|
| 181 |
|
| 182 |
# Custom component for loading examples
|
| 183 |
def load_example(model_name, images, prompt):
|
| 184 |
return model_name, images, prompt
|
| 185 |
|
| 186 |
-
|
| 187 |
-
# demo_vqascore = gr.Interface(
|
| 188 |
-
# fn=generate, # function to call
|
| 189 |
-
# inputs=[
|
| 190 |
-
# gr.Dropdown(["clip-flant5-xxl", "clip-flant5-xl", ], label="Model Name"),
|
| 191 |
-
# gr.Image(type="filepath"),
|
| 192 |
-
# gr.Textbox(label="Prompt")
|
| 193 |
-
# ], # define the types of inputs
|
| 194 |
-
# examples=[
|
| 195 |
-
# ["clip-flant5-xl", example_imgs[0], example_prompt0],
|
| 196 |
-
# ["clip-flant5-xl", example_imgs[0], example_prompt1],
|
| 197 |
-
# ],
|
| 198 |
-
# outputs="number", # define the type of output
|
| 199 |
-
# title="VQAScore", # title of the app
|
| 200 |
-
# description="This model evaluates the similarity between an image and a text prompt."
|
| 201 |
-
# )
|
| 202 |
-
|
| 203 |
# Create the second demo: VQAScore Ranking
|
| 204 |
with gr.Blocks() as demo_vqascore_ranking:
|
| 205 |
# gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")
|
|
@@ -227,29 +101,6 @@ with gr.Blocks() as demo_vqascore_ranking:
|
|
| 227 |
example1_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt0), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 228 |
example2_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt1), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 229 |
|
| 230 |
-
# # Create the second demo
|
| 231 |
-
# with gr.Blocks() as demo_vqascore_ranking:
|
| 232 |
-
# gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")
|
| 233 |
-
# model_dropdown = gr.Dropdown(["clip-flant5-xxl", "clip-flant5-xl"], value="clip-flant5-xxl", label="Model Name")
|
| 234 |
-
# gallery = gr.Gallery(label="Generated Images", elem_id="input-gallery", columns=4, allow_preview=True)
|
| 235 |
-
# prompt = gr.Textbox(label="Prompt")
|
| 236 |
-
# rank_button = gr.Button("Rank Images")
|
| 237 |
-
# ranked_gallery = gr.Gallery(label="Ranked Images with Scores", elem_id="ranked-gallery", columns=4, allow_preview=True)
|
| 238 |
-
|
| 239 |
-
# rank_button.click(fn=rank_images, inputs=[model_dropdown, gallery, prompt], outputs=ranked_gallery)
|
| 240 |
-
|
| 241 |
-
# # Custom example buttons
|
| 242 |
-
# example1_button = gr.Button("Load Example 1")
|
| 243 |
-
# example2_button = gr.Button("Load Example 2")
|
| 244 |
-
|
| 245 |
-
# example1_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt0), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 246 |
-
# example2_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt1), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 247 |
-
|
| 248 |
-
# # Layout to allow user to input their own data
|
| 249 |
-
# with gr.Row():
|
| 250 |
-
# gr.Column([model_dropdown, gallery, prompt, rank_button])
|
| 251 |
-
# gr.Column([example1_button, example2_button])
|
| 252 |
-
|
| 253 |
# Launch the interface
|
| 254 |
demo_vqascore_ranking.queue()
|
| 255 |
demo_vqascore_ranking.launch(share=True)
|
|
|
|
| 60 |
|
| 61 |
return ranked_images
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
### EXAMPLES ###
|
| 65 |
example_imgs = ["0_imgs/DALLE3.png",
|
|
|
|
| 68 |
"0_imgs/SDXL.jpg"]
|
| 69 |
example_prompt0 = "Two dogs of different breeds playfully chasing around a tree"
|
| 70 |
example_prompt1 = "Two dogs of the same breed playing on the grass"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
# Custom component for loading examples
|
| 74 |
def load_example(model_name, images, prompt):
|
| 75 |
return model_name, images, prompt
|
| 76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
# Create the second demo: VQAScore Ranking
|
| 78 |
with gr.Blocks() as demo_vqascore_ranking:
|
| 79 |
# gr.Markdown("# VQAScore Ranking\nThis model ranks a gallery of images based on their similarity to a text prompt.")
|
|
|
|
| 101 |
example1_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt0), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 102 |
example2_button.click(fn=lambda: load_example("clip-flant5-xxl", example_imgs, example_prompt1), inputs=[], outputs=[model_dropdown, gallery, prompt])
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
# Launch the interface
|
| 105 |
demo_vqascore_ranking.queue()
|
| 106 |
demo_vqascore_ranking.launch(share=True)
|