Rentolly commited on
Commit
b316f95
·
1 Parent(s): a5cd866

新增 .gitignore 檔案並重構 app.py,整合 Stable Diffusion 與 BLIP 功能

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +174 -145
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv/
app.py CHANGED
@@ -1,154 +1,183 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
  with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
 
 
119
  with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
  ],
150
- outputs=[result, seed],
 
151
  )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
 
1
+ import os
 
 
 
 
 
2
  import torch
3
+ import gradio as gr
4
+ from PIL import Image
5
+ from diffusers import StableDiffusionPipeline
6
+ from transformers import BlipProcessor, BlipForConditionalGeneration
7
+
8
+ # -----------------------------
9
+ # 環境與效能設定(CPU 友善)
10
+ # -----------------------------
11
+ os.environ.setdefault("HF_HUB_DISABLE_TELEMETRY", "1")
12
+ os.environ.setdefault("TOKENIZERS_PARALLELISM", "false")
13
+
14
+ # 限制 CPU 執行緒數(避免在 Spaces 上搶太多資源)
15
+ torch.set_num_threads(max(1, min(4, os.cpu_count() or 2)))
16
+
17
+ DEVICE = "cpu" # 若有 GPU 可改為 "cuda"
18
+ DTYPE = torch.float32
19
+
20
+ # -----------------------------
21
+ # 載入模型(首次啟動會自動下載)
22
+ # -----------------------------
23
+
24
+ def load_sd_pipe():
25
+ pipe = StableDiffusionPipeline.from_pretrained(
26
+ "runwayml/stable-diffusion-v1-5",
27
+ torch_dtype=DTYPE,
28
+ safety_checker=None, # 若課堂需要內容審查可改回預設 safety_checker
29
+ )
30
+ pipe = pipe.to(DEVICE)
31
+ # 省記憶體設定(對 CPU/低資源環境友好)
32
+ pipe.enable_attention_slicing()
33
+ pipe.enable_vae_tiling()
34
+ return pipe
35
+
36
+
37
+ def load_blip():
38
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
39
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(DEVICE)
40
+ return processor, model
41
+
42
+
43
+ SD_PIPE = load_sd_pipe()
44
+ BLIP_PROCESSOR, BLIP_MODEL = load_blip()
45
+
46
+
47
+ # -----------------------------
48
+ # 功能:Text → Image(Stable Diffusion)
49
+ # -----------------------------
50
+
51
+ def txt2img(prompt: str, neg_prompt: str, steps: int, guidance: float, seed: int | None):
52
+ if not prompt or prompt.strip() == "":
53
+ return None, "請輸入 prompt。"
54
+
55
+ generator = None
56
+ if seed is not None and seed >= 0:
57
+ generator = torch.Generator(device=DEVICE).manual_seed(int(seed))
58
+
59
+ with torch.inference_mode():
60
+ image = SD_PIPE(
61
+ prompt=prompt,
62
+ negative_prompt=neg_prompt or None,
63
+ num_inference_steps=int(steps),
64
+ guidance_scale=float(guidance),
65
+ generator=generator,
66
+ height=512,
67
+ width=512,
68
+ ).images[0]
69
+ return image, "生成完成"
70
+
71
+
72
+ # -----------------------------
73
+ # 功能:Image → Text(BLIP Caption)
74
+ # -----------------------------
75
+
76
+ def caption_image(image: Image.Image, max_len: int = 50):
77
+ if image is None:
78
+ return "請先提供圖片"
79
+
80
+ image = image.convert("RGB")
81
+ inputs = BLIP_PROCESSOR(image, return_tensors="pt").to(DEVICE)
82
+ with torch.inference_mode():
83
+ out = BLIP_MODEL.generate(**inputs, max_length=int(max_len))
84
+ caption = BLIP_PROCESSOR.decode(out[0], skip_special_tokens=True)
85
+ return caption
86
+
87
+
88
+ # -----------------------------
89
+ # 功能:一鍵串接(Text → Image → Caption)
90
+ # -----------------------------
91
+
92
+ def generate_and_caption(prompt: str, neg_prompt: str, steps: int, guidance: float, seed: int | None, max_len: int):
93
+ image, _ = txt2img(prompt, neg_prompt, steps, guidance, seed)
94
+ if image is None:
95
+ return None, ""
96
+ cap = caption_image(image, max_len=max_len)
97
+ return image, cap
98
+
99
+
100
+ # -----------------------------
101
+ # Gradio 介面
102
+ # -----------------------------
103
+ with gr.Blocks(title="Stable Diffusion + BLIP", theme=gr.themes.Soft()) as demo:
104
+ gr.Markdown(
105
+ """
106
+ # 🖼️ Stable Diffusion + 📝 BLIP
107
+ 一個簡單的 Vision-Language 展示:**文字生圖** 與 **圖片描述**。\
108
+ 在 CPU 上執行可能較慢,建議把步數調低(例如 15)。
109
+ """
110
+ )
111
 
112
+ with gr.Tabs():
113
+ # Tab 1: Text -> Image
114
+ with gr.TabItem("Text → Image"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  with gr.Row():
116
+ with gr.Column(scale=1):
117
+ prompt = gr.Textbox(label="Prompt", placeholder="a cozy cat reading a book by the window, warm lighting")
118
+ neg_prompt = gr.Textbox(label="Negative Prompt", placeholder="blurry, low quality, watermark", value="")
119
+ steps = gr.Slider(5, 50, value=15, step=1, label="Steps (越高越慢)")
120
+ guidance = gr.Slider(1.0, 12.0, value=7.5, step=0.5, label="Guidance Scale")
121
+ seed = gr.Number(label="Seed(相同設定可重現,-1 表示隨機)", value=-1, precision=0)
122
+ run_btn = gr.Button("🚀 生成圖片", variant="primary")
123
+ with gr.Column(scale=1):
124
+ out_img = gr.Image(label="Generated Image", format="png")
125
+ status = gr.Markdown()
126
+
127
+ def _run_txt2img(prompt, neg_prompt, steps, guidance, seed):
128
+ with gr.Progress(track_tqdm=True) as prog:
129
+ prog(0, desc="準備模型…")
130
+ image, msg = txt2img(prompt, neg_prompt, steps, guidance, int(seed) if seed is not None else None)
131
+ prog(1, desc="完成")
132
+ return image, msg
133
+
134
+ run_btn.click(_run_txt2img, [prompt, neg_prompt, steps, guidance, seed], [out_img, status])
135
+
136
+ # Tab 2: Image -> Caption
137
+ with gr.TabItem("Image → Caption"):
138
+ with gr.Row():
139
+ with gr.Column(scale=1):
140
+ in_img = gr.Image(label="Upload Image", type="pil")
141
+ max_len = gr.Slider(10, 100, value=50, step=5, label="Caption 長度上限")
142
+ cap_btn = gr.Button("📝 產生描述")
143
+ with gr.Column(scale=1):
144
+ caption_out = gr.Textbox(label="BLIP Caption")
145
+
146
+ cap_btn.click(caption_image, [in_img, max_len], [caption_out])
147
 
148
+ # Tab 3: 一鍵串接
149
+ with gr.TabItem("Generate → Caption"):
150
  with gr.Row():
151
+ with gr.Column(scale=1):
152
+ g_prompt = gr.Textbox(label="Prompt", placeholder="a cozy cat reading a book by the window, warm lighting")
153
+ g_neg_prompt = gr.Textbox(label="Negative Prompt", value="")
154
+ g_steps = gr.Slider(5, 50, value=15, step=1, label="Steps")
155
+ g_guidance = gr.Slider(1.0, 12.0, value=7.5, step=0.5, label="Guidance Scale")
156
+ g_seed = gr.Number(label="Seed(-1 為隨機)", value=-1, precision=0)
157
+ g_max_len = gr.Slider(10, 100, value=50, step=5, label="Caption 長度上限")
158
+ g_btn = gr.Button("✨ 一鍵生成 + 描述", variant="primary")
159
+ with gr.Column(scale=1):
160
+ g_img = gr.Image(label="Generated Image", format="png")
161
+ g_cap = gr.Textbox(label="BLIP Caption")
162
+
163
+ def _run_generate_and_caption(prompt, neg_prompt, steps, guidance, seed, max_len):
164
+ with gr.Progress(track_tqdm=True) as prog:
165
+ prog(0, desc="生成圖片中…")
166
+ image, caption = generate_and_caption(prompt, neg_prompt, steps, guidance, int(seed) if seed is not None else None, int(max_len))
167
+ prog(1, desc="完成")
168
+ return image, caption
169
+
170
+ g_btn.click(_run_generate_and_caption, [g_prompt, g_neg_prompt, g_steps, g_guidance, g_seed, g_max_len], [g_img, g_cap])
171
+
172
+ gr.Examples(
173
+ examples=[
174
+ ["a cozy cat reading a book by the window, warm lighting", "", 15, 7.5, 42],
175
+ ["cinematic portrait of an astronaut in a forest, film grain, bokeh", "", 20, 8.0, 1234],
 
 
 
 
176
  ],
177
+ inputs=[prompt, neg_prompt, steps, guidance, seed],
178
+ label="Prompt 範例(可直接點選)",
179
  )
180
 
181
  if __name__ == "__main__":
182
+ # 在 HF Spaces 使用:不用 demo.launch(share=True)
183
+ demo.launch()