Spaces:
Runtime error
Runtime error
Merge branch 'main' of https://huggingface.co/spaces/TeamTonic/MultiMed
Browse files
app.py
CHANGED
|
@@ -10,6 +10,7 @@ import json
|
|
| 10 |
import dotenv
|
| 11 |
from scipy.io.wavfile import write
|
| 12 |
import PIL
|
|
|
|
| 13 |
dotenv.load_dotenv()
|
| 14 |
|
| 15 |
client = Client("facebook/seamless_m4t")
|
|
@@ -232,7 +233,19 @@ def process_and_query(text=None, image=None, audio=None):
|
|
| 232 |
# Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
|
| 233 |
vectara_response_json = query_vectara(text)
|
| 234 |
markdown_output = convert_to_markdown(vectara_response_json)
|
| 235 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
except Exception as e:
|
| 237 |
return str(e)
|
| 238 |
|
|
|
|
| 10 |
import dotenv
|
| 11 |
from scipy.io.wavfile import write
|
| 12 |
import PIL
|
| 13 |
+
from openai import OpenAI
|
| 14 |
dotenv.load_dotenv()
|
| 15 |
|
| 16 |
client = Client("facebook/seamless_m4t")
|
|
|
|
| 233 |
# Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
|
| 234 |
vectara_response_json = query_vectara(text)
|
| 235 |
markdown_output = convert_to_markdown(vectara_response_json)
|
| 236 |
+
client = OpenAI()
|
| 237 |
+
prompt ="Answer in the same language, write it better, more understandable and shorter:"
|
| 238 |
+
markdown_output_final = markdown_output
|
| 239 |
+
|
| 240 |
+
completion = client.chat.completions.create(
|
| 241 |
+
model="gpt-3.5-turbo",
|
| 242 |
+
messages=[
|
| 243 |
+
{"role": "system", "content": prompt},
|
| 244 |
+
{"role": "user", "content": markdown_output_final}
|
| 245 |
+
]
|
| 246 |
+
)
|
| 247 |
+
final_response= completion.choices[0].message.content
|
| 248 |
+
return final_response
|
| 249 |
except Exception as e:
|
| 250 |
return str(e)
|
| 251 |
|