Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
import torch
|
|
|
|
| 4 |
|
| 5 |
zero = torch.Tensor([0]).cuda()
|
| 6 |
print(zero.device) # <-- 'cpu' π€
|
|
@@ -10,11 +11,19 @@ def greet(n):
|
|
| 10 |
print(zero.device) # <-- 'cuda:0' π€
|
| 11 |
return f"Hello {zero + n} Tensor"
|
| 12 |
|
| 13 |
-
def
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
|
| 18 |
def run():
|
| 19 |
with gr.Blocks(css=".gradio-container {background-color: lightgray} #radio_div {background-color: #FFD8B4; font-size: 40px;}") as demo:
|
| 20 |
gr.Markdown("<h1 style='text-align: center;'>"+ "One Shot Talking Face from Text" + "</h1><br/><br/>")
|
|
@@ -22,24 +31,16 @@ def run():
|
|
| 22 |
# with gr.Box():
|
| 23 |
with gr.Row():
|
| 24 |
# with gr.Row().style(equal_height=True):
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
video_out = gr.Video(show_label=True,label="Output")
|
| 29 |
with gr.Row():
|
| 30 |
-
|
| 31 |
-
btn = gr.Button("Generate")
|
| 32 |
-
# gr.Markdown(
|
| 33 |
-
# """
|
| 34 |
-
# <p style='text-align: center;'>Feel free to give us your thoughts on this demo and please contact us at
|
| 35 |
-
# <a href="mailto:[email protected]" target="_blank">[email protected]</a>
|
| 36 |
-
# <p style='text-align: center;'>Developed by: <a href="https://www.pragnakalp.com" target="_blank">Pragnakalp Techlabs</a></p>
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
btn.click(one_shot, inputs=[image_in,input_text,gender], outputs=[video_out])
|
| 41 |
demo.queue()
|
| 42 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
| 43 |
|
|
|
|
| 44 |
if __name__ == "__main__":
|
| 45 |
-
run()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import spaces
|
| 3 |
import torch
|
| 4 |
+
import subprocess
|
| 5 |
|
| 6 |
zero = torch.Tensor([0]).cuda()
|
| 7 |
print(zero.device) # <-- 'cpu' π€
|
|
|
|
| 11 |
print(zero.device) # <-- 'cuda:0' π€
|
| 12 |
return f"Hello {zero + n} Tensor"
|
| 13 |
|
| 14 |
+
def run_infrence(input_video,input_audio):
|
| 15 |
+
audio = "sample_data/sir.mp3"
|
| 16 |
+
video = "sample_data/spark_input.mp4"
|
| 17 |
+
command = f'python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face "{video}" --audio "{audio}"'
|
| 18 |
+
print("running ")
|
| 19 |
+
# Execute the command
|
| 20 |
+
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
|
| 21 |
+
|
| 22 |
+
# Get the output
|
| 23 |
+
output, error = process.communicate()
|
| 24 |
+
|
| 25 |
+
return output
|
| 26 |
|
|
|
|
| 27 |
def run():
|
| 28 |
with gr.Blocks(css=".gradio-container {background-color: lightgray} #radio_div {background-color: #FFD8B4; font-size: 40px;}") as demo:
|
| 29 |
gr.Markdown("<h1 style='text-align: center;'>"+ "One Shot Talking Face from Text" + "</h1><br/><br/>")
|
|
|
|
| 31 |
# with gr.Box():
|
| 32 |
with gr.Row():
|
| 33 |
# with gr.Row().style(equal_height=True):
|
| 34 |
+
input_video = gr.Video(label="Input Video")
|
| 35 |
+
input_audio = gr.Audio(label="Input Audio")
|
| 36 |
+
video_out = gr.Video(show_label=True,label="Output")
|
|
|
|
| 37 |
with gr.Row():
|
| 38 |
+
btn = gr.Button("Generate")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
btn.click(run_infrence,inputs=[input_video,input_audio], outputs=[video_out])
|
|
|
|
|
|
|
| 41 |
demo.queue()
|
| 42 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
| 43 |
|
| 44 |
+
|
| 45 |
if __name__ == "__main__":
|
| 46 |
+
run()
|