merge CLIPSeg demo
Browse files- __pycache__/share_btn.cpython-38.pyc +0 -0
- app.py +8 -4
__pycache__/share_btn.cpython-38.pyc
ADDED
|
Binary file (6.99 kB). View file
|
|
|
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import os
|
| 2 |
import torch
|
| 3 |
import gradio as gr
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
import matplotlib.pyplot as plt
|
| 6 |
from diffusers import DiffusionPipeline
|
|
@@ -38,7 +39,7 @@ def read_content(file_path):
|
|
| 38 |
return content
|
| 39 |
|
| 40 |
|
| 41 |
-
def predict(dict, reference, scale, seed, step):
|
| 42 |
width, height = dict["image"].size
|
| 43 |
if width < height:
|
| 44 |
factor = width / 512.0
|
|
@@ -52,6 +53,8 @@ def predict(dict, reference, scale, seed, step):
|
|
| 52 |
|
| 53 |
init_image = dict["image"].convert("RGB").resize((width, height))
|
| 54 |
mask = dict["mask"].convert("RGB").resize((width, height))
|
|
|
|
|
|
|
| 55 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
| 56 |
output = pipe(
|
| 57 |
image=init_image,
|
|
@@ -119,8 +122,9 @@ with image_blocks as demo:
|
|
| 119 |
with gr.Box():
|
| 120 |
with gr.Row():
|
| 121 |
with gr.Column():
|
| 122 |
-
image = gr.Image(source=
|
| 123 |
-
|
|
|
|
| 124 |
|
| 125 |
with gr.Column():
|
| 126 |
image_out = gr.Image(label="Output", elem_id="output-img").style(height=400)
|
|
@@ -146,7 +150,7 @@ with image_blocks as demo:
|
|
| 146 |
with gr.Column():
|
| 147 |
gr.Examples(ref_list, inputs=[reference],label="Examples - Reference Image",examples_per_page=12)
|
| 148 |
|
| 149 |
-
btn.click(fn=predict, inputs=[image, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
|
| 150 |
share_button.click(None, [], [], _js=share_js)
|
| 151 |
|
| 152 |
gr.HTML(
|
|
|
|
| 1 |
import os
|
| 2 |
import torch
|
| 3 |
import gradio as gr
|
| 4 |
+
import numpy as np
|
| 5 |
from PIL import Image
|
| 6 |
import matplotlib.pyplot as plt
|
| 7 |
from diffusers import DiffusionPipeline
|
|
|
|
| 39 |
return content
|
| 40 |
|
| 41 |
|
| 42 |
+
def predict(dict, text_query, reference, scale, seed, step):
|
| 43 |
width, height = dict["image"].size
|
| 44 |
if width < height:
|
| 45 |
factor = width / 512.0
|
|
|
|
| 53 |
|
| 54 |
init_image = dict["image"].convert("RGB").resize((width, height))
|
| 55 |
mask = dict["mask"].convert("RGB").resize((width, height))
|
| 56 |
+
print(np.array(mask))
|
| 57 |
+
print(text_query)
|
| 58 |
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
| 59 |
output = pipe(
|
| 60 |
image=init_image,
|
|
|
|
| 122 |
with gr.Box():
|
| 123 |
with gr.Row():
|
| 124 |
with gr.Column():
|
| 125 |
+
image = gr.Image(source="upload", tool="sketch", elem_id="image_upload", type="pil", label="Source Image")
|
| 126 |
+
text = gr.Textbox(lines=1, placeholder="Clothing item you want to replace...")
|
| 127 |
+
reference = gr.Image(source="upload", elem_id="image_upload", type="pil", label="Reference Image")
|
| 128 |
|
| 129 |
with gr.Column():
|
| 130 |
image_out = gr.Image(label="Output", elem_id="output-img").style(height=400)
|
|
|
|
| 150 |
with gr.Column():
|
| 151 |
gr.Examples(ref_list, inputs=[reference],label="Examples - Reference Image",examples_per_page=12)
|
| 152 |
|
| 153 |
+
btn.click(fn=predict, inputs=[image, text, reference, guidance, seed, steps], outputs=[image_out, community_icon, loading_icon, share_button])
|
| 154 |
share_button.click(None, [], [], _js=share_js)
|
| 155 |
|
| 156 |
gr.HTML(
|