MaxMilan1
commited on
Commit
·
35a268c
1
Parent(s):
36be636
changeeeeeeeeeeeeeeeeeeeeeeees
Browse files- app.py +1 -1
- util/text_img.py +6 -6
app.py
CHANGED
|
@@ -28,7 +28,7 @@ with gr.Blocks(theme=theme) as GenDemo:
|
|
| 28 |
with gr.Row(variant="panel"):
|
| 29 |
with gr.Column():
|
| 30 |
prompt = gr.Textbox(label="Enter a discription of a shoe")
|
| 31 |
-
image = gr.Image(label="Enter an image of a shoe, that you want to use as a reference"
|
| 32 |
strength = gr.Slider(label="Strength", minimum=0.1, maximum=1.0, value=0.5, step=0.1)
|
| 33 |
gr.Examples(
|
| 34 |
examples=[
|
|
|
|
| 28 |
with gr.Row(variant="panel"):
|
| 29 |
with gr.Column():
|
| 30 |
prompt = gr.Textbox(label="Enter a discription of a shoe")
|
| 31 |
+
image = gr.Image(label="Enter an image of a shoe, that you want to use as a reference")
|
| 32 |
strength = gr.Slider(label="Strength", minimum=0.1, maximum=1.0, value=0.5, step=0.1)
|
| 33 |
gr.Examples(
|
| 34 |
examples=[
|
util/text_img.py
CHANGED
|
@@ -14,10 +14,6 @@ import gradio as gr
|
|
| 14 |
def check_prompt(prompt):
|
| 15 |
if prompt is None:
|
| 16 |
raise gr.Error("Please enter a prompt!")
|
| 17 |
-
|
| 18 |
-
imagepipe = AutoPipelineForImage2Image.from_pretrained(
|
| 19 |
-
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
| 20 |
-
)
|
| 21 |
|
| 22 |
controlNet_normal = ControlNetModel.from_pretrained(
|
| 23 |
"fusing/stable-diffusion-v1-5-controlnet-normal",
|
|
@@ -58,10 +54,14 @@ def generate_txttoimg(prompt, control_image, controlnet):
|
|
| 58 |
return image2
|
| 59 |
|
| 60 |
@spaces.GPU
|
| 61 |
-
def generate_imgtoimg(prompt,
|
| 62 |
prompt += ", no background, side view, minimalist shot, single shoe, no legs, product photo"
|
| 63 |
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
image2 = rembg.remove(image)
|
| 67 |
|
|
|
|
| 14 |
def check_prompt(prompt):
|
| 15 |
if prompt is None:
|
| 16 |
raise gr.Error("Please enter a prompt!")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
controlNet_normal = ControlNetModel.from_pretrained(
|
| 19 |
"fusing/stable-diffusion-v1-5-controlnet-normal",
|
|
|
|
| 54 |
return image2
|
| 55 |
|
| 56 |
@spaces.GPU
|
| 57 |
+
def generate_imgtoimg(prompt, init_image, strength=0.5):
|
| 58 |
prompt += ", no background, side view, minimalist shot, single shoe, no legs, product photo"
|
| 59 |
|
| 60 |
+
imagepipe = AutoPipelineForImage2Image.from_pretrained(
|
| 61 |
+
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
image = imagepipe(prompt, image=init_image, strength=strength).images[0]
|
| 65 |
|
| 66 |
image2 = rembg.remove(image)
|
| 67 |
|