File size: 2,611 Bytes
4f7951a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3a63a1
4f7951a
c3a63a1
 
 
 
 
4f7951a
 
 
c3a63a1
4f7951a
779fe4f
4f7951a
c3a63a1
4f7951a
 
 
 
 
 
 
 
 
 
c3a63a1
4f7951a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3a63a1
4f7951a
 
 
c3a63a1
4f7951a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
import requests
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
import numpy as np
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from dotenv import load_dotenv

load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

dataset = load_dataset("altaidevorg/women-health-mini")

conversation_data = [
    turn["content"]
    for conv in dataset["train"]
    for turn in conv["conversations"]
]

model = SentenceTransformer("all-MiniLM-L6-v2", device="cpu")

conversation_embeddings = model.encode(conversation_data)

GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"


def get_more_relevant_rsponse(query):
    query_embedding = model.encode([query])
    similarities = np.dot(conversation_embeddings, query_embedding.T).flatten()
    best_match_idx = np.argmax(similarities)
    return conversation_data[best_match_idx]

@app.post("/chat/")
async def chat_with_bot(user_query:dict):
    prompt = user_query.get("message", "")

    if not prompt :
        return {"response": "Prompt is required!"}
    
    history_response = get_more_relevant_rsponse(prompt)

    context_prompt = f"""
            You are a chatbot named fille AI specialized in women's health. Provide **clear, factual, and supportive** responses. 
            If the user's question involves medical advice, remind them to consult a healthcare professional.  

            User Question: {prompt}

            Below is a similar question/response from the knowledge base that may contain helpful information:
            {history_response}

            Please provide your own professional, friendly, and informative response that addresses the user's specific question.
    """
    
    headers = {
        "Authorization": f"Bearer {GROQ_API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": "llama3-70b-8192",  
        "messages": [{"role": "user", "content": context_prompt}]
    }

    response = requests.post(GROQ_API_URL, json=payload, headers=headers)

    if response.status_code == 200:
        bot_reply = response.json()["choices"][0]["message"]["content"]
        return {
            "response": bot_reply
        }

    else:
        return {"response": "Error in fetching the data from groq AI"}

if __name__ == "__main__" :
     import uvicorn
     uvicorn.run(app, host="0.0.0.0", port=8000)