| import gradio as gr | |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline | |
| from deep_translator import GoogleTranslator | |
| import torch | |
| import string | |
| def preprocess_data(text: str) -> str: | |
| return text.lower().translate(str.maketrans("", "", string.punctuation)).strip() | |
| SARCASTIC_MODEL_PATH = "helinivan/english-sarcasm-detector" | |
| SENTIMENT_MODEL_PATH = "lxyuan/distilbert-base-multilingual-cased-sentiments-student" | |
| sarcasm_tokenizer = AutoTokenizer.from_pretrained(SARCASTIC_MODEL_PATH) | |
| sarcasm_model = AutoModelForSequenceClassification.from_pretrained(SARCASTIC_MODEL_PATH) | |
| sentiment_analyzer = pipeline("text-classification", model=SENTIMENT_MODEL_PATH, return_all_scores=True) | |
| def analyze_text(user_input): | |
| translated_text = GoogleTranslator(source="auto", target="en").translate(user_input) | |
| preprocessed_text = preprocess_data(translated_text) | |
| tokenized_text = sarcasm_tokenizer([preprocessed_text], padding=True, truncation=True, max_length=256, return_tensors="pt") | |
| with torch.no_grad(): | |
| output = sarcasm_model(**tokenized_text) | |
| probs = torch.nn.functional.softmax(output.logits, dim=-1).tolist()[0] | |
| sarcasm_confidence = max(probs) | |
| is_sarcastic = probs.index(sarcasm_confidence) | |
| if is_sarcastic: | |
| return f"Sarcastic" | |
| else: | |
| sentiment_scores = sentiment_analyzer(translated_text)[0] | |
| sentiment_result = max(sentiment_scores, key=lambda x: x["score"]) | |
| return f"{sentiment_result['label'].capitalize()}" | |
| iface = gr.Interface( | |
| fn=analyze_text, | |
| inputs=gr.Textbox(label="Enter your text (Tamil)"), | |
| outputs=gr.Textbox(label="Analysis Result"), | |
| description="Enter text in TAMIL", | |
| examples=[ | |
| ['ஹாய் மாலினி, நான் இதை சொல்லியே ஆகணும், நீ அவ்ளோ அழகு, இங்க உன்னைவிட ஒரு அழகா யாரும் பாத்துருக்க மாட்டாங்க'], | |
| ['இது நல்ல இல்ல'], | |
| ['நம்ம ஜெயிச்சிட்டோம் மாறா! '] | |
| ], | |
| ) | |
| iface.launch(share=True) | |