File size: 10,615 Bytes
e76fc7c
3b55e60
116d0bf
 
 
 
 
 
 
9dc4f21
 
116d0bf
3b55e60
 
 
 
0e0dfeb
 
 
 
 
 
 
e76fc7c
116d0bf
 
 
 
0e0dfeb
e76fc7c
 
0e0dfeb
e76fc7c
3b55e60
 
e76fc7c
 
 
 
116d0bf
 
 
 
3b55e60
116d0bf
e76fc7c
5505bcc
3b55e60
 
 
e76fc7c
5505bcc
 
3b55e60
 
5505bcc
 
 
e76fc7c
5505bcc
 
 
 
 
 
 
3b55e60
e76fc7c
 
0e0dfeb
 
e76fc7c
0e0dfeb
 
 
 
5505bcc
0e0dfeb
 
5505bcc
e76fc7c
 
 
 
 
 
 
 
 
3b55e60
116d0bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5505bcc
 
 
3b55e60
 
116d0bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9dc4f21
116d0bf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9dc4f21
116d0bf
 
 
 
 
 
 
 
 
 
 
 
3b55e60
 
116d0bf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# app.py
import gradio as gr
from models import (
    load_sentiment_model, 
    load_summarization_model, 
    load_translation_model,
    load_question_answering_model,
    load_text_generation_model,
    load_ner_model,
    load_text_classification_model
    # Removed: load_text_to_sql_model
)

# Title and Description for your app
TITLE = "๐Ÿง  Eternal Equation AI Processor (- = +)"
DESCRIPTION = """
**The Eternal Equation: (- = +)**  
A conceptual framework made real. This tool demonstrates how simple interfaces can harness complex AI.
**๐ŸŽฏ Purpose:**  
To make powerful AI models accessible, understandable, and usable for everyone through a clean, intuitive interface based on the universal pattern of Input -> Process -> Output.
**โšก Current Capabilities:**  
โ€ข **Sentiment Analysis:** Determine the emotional tone (Positive/Negative) of any text.  
โ€ข **Text Summarization:** Condense long articles, reports, or paragraphs into concise summaries.
โ€ข **Translation:** Convert English text to French.
โ€ข **Question Answering:** Get answers from context.
โ€ข **Text Generation:** Create new text from prompts.
โ€ข **Named Entity Recognition:** Identify people, organizations, locations, etc.
โ€ข **Zero-Shot Classification:** Categorize text without pre-training.
**โš ๏ธ Important Limitations:**  
โ€ข **Summarization Length:** Works best with texts between **50 and 500 words**. Longer texts are automatically truncated.
โ€ข **Processing Speed:** Hosted on free-tier CPU hardware. Processing may take 10-30 seconds.
โ€ข **Beta Stage:** This is a live demo and proof-of-concept. Outputs may occasionally be imperfect.
**๐Ÿ”ฎ The Future:** More modes for code, images, and audio are coming soon.
"""

# Load all models at startup (The Orchestra assembles)
sentiment_pipeline = load_sentiment_model()
summarize_pipeline = load_summarization_model()
translate_pipeline = load_translation_model()
qa_pipeline = load_question_answering_model()
textgen_pipeline = load_text_generation_model()
ner_pipeline = load_ner_model()
classify_pipeline = load_text_classification_model()

def process_text(input_text, mode, context=None, candidate_labels=None):
    """The Conductor: routes input to the correct specialist model."""
    # Check for empty input FIRST
    if not input_text.strip():
        return "โš ๏ธ Please enter some text to process."
    
    # Handle Sentiment Analysis
    if mode == "Sentiment Analysis":
        try:
            result = sentiment_pipeline(input_text)
            return f"๐ŸŽฏ **Label:** {result[0]['label']}\n\n๐Ÿ”ฎ **Confidence:** {result[0]['score']:.4f}"
        except Exception as e:
            return f"โŒ Error in sentiment analysis: {str(e)}"
    
    # Handle Text Summarization
    elif mode == "Text Summarization":
        # Check word count BEFORE even trying the model
        word_list = input_text.split()
        word_count = len(word_list)
        
        if word_count < 50:
            return "๐Ÿ“ **Please provide a longer text for summarization (at least 50 words).** This model is designed for articles and paragraphs."
        
        # Smart Truncation for long texts
        max_word_limit = 500
        if word_count > max_word_limit:
            truncated_text = " ".join(word_list[:max_word_limit])
            warning_msg = f"โš ๏ธ Note: Your text was very long ({word_count} words). Summarized the first {max_word_limit} words.\n\n"
        else:
            truncated_text = input_text
            warning_msg = ""
        
        try:
            result = summarize_pipeline(truncated_text, max_length=130, min_length=30, do_sample=False)
            return f"{warning_msg}๐Ÿ“„ **Summary:**\n\n{result[0]['summary_text']}"
        except Exception as e:
            return f"โŒ The summarization model failed. Please try a different text.\n(Error: {str(e)})"

    # Handle Translation
    elif mode == "Translation (EN to FR)":
        try:
            result = translate_pipeline(input_text)
            return f"๐Ÿ‡ซ๐Ÿ‡ท **Translation:**\n\n{result[0]['translation_text']}"
        except Exception as e:
            return f"โŒ Translation error: {str(e)}"
            
    # Handle Question Answering
    elif mode == "Question Answering":
        if not context or not context.strip():
            return "โš ๏ธ For question answering, please provide both a question and context text."
        
        try:
            result = qa_pipeline(question=input_text, context=context)
            return f"โ“ **Question:** {input_text}\n\n๐Ÿ“š **Answer:** {result['answer']}\n\n๐ŸŽฏ **Confidence:** {result['score']:.4f}"
        except Exception as e:
            return f"โŒ Question answering error: {str(e)}"
            
    # Handle Text Generation
    elif mode == "Text Generation":
        try:
            # Limit generation to prevent long processing times
            result = textgen_pipeline(input_text, max_length=100, do_sample=True, temperature=0.7)
            return f"๐ŸŽจ **Generated Text:**\n\n{result[0]['generated_text']}"
        except Exception as e:
            return f"โŒ Text generation error: {str(e)}"
            
    # Handle Named Entity Recognition
    elif mode == "Named Entity Recognition":
        try:
            result = ner_pipeline(input_text)
            if not result:
                return "๐Ÿ” No named entities found in the text."
            
            formatted_result = "๐Ÿท๏ธ **Named Entities:**\n\n"
            for entity in result:
                formatted_result += f"- {entity['word']} ({entity['entity_group']}, confidence: {entity['score']:.4f})\n"
            
            return formatted_result
        except Exception as e:
            return f"โŒ NER error: {str(e)}"
            
    # Handle Zero-Shot Classification
    elif mode == "Zero-Shot Classification":
        if not candidate_labels or not candidate_labels.strip():
            return "โš ๏ธ For zero-shot classification, please provide candidate labels (comma-separated)."
        
        try:
            labels = [label.strip() for label in candidate_labels.split(",")]
            result = classify_pipeline(input_text, candidate_labels=labels)
            
            formatted_result = "๐Ÿ“Š **Classification Results:**\n\n"
            for label, score in zip(result['labels'], result['scores']):
                formatted_result += f"- {label}: {score:.4f}\n"
            
            return formatted_result
        except Exception as e:
            return f"โŒ Classification error: {str(e)}"
            
    # Handle any other mode that might be added in the future
    else:
        return "Selected mode is not yet implemented."

# Create the Gradio Interface (The Frontend UI)
with gr.Blocks(title=TITLE, css=".gradio-container {max-width: 800px; margin: auto;}") as demo:
    gr.Markdown(f"# {TITLE}")
    gr.Markdown(DESCRIPTION)
    
    with gr.Row():
        with gr.Column(scale=1):
            input_text = gr.Textbox(label="โž– Input Text", placeholder="Paste your text here...", lines=5)
            mode = gr.Radio(
                choices=[
                    "Sentiment Analysis",
                    "Text Summarization",
                    "Translation (EN to FR)",
                    "Question Answering",
                    "Text Generation",
                    "Named Entity Recognition",
                    "Zero-Shot Classification"
                ], 
                label="๐ŸŸฐ Processing Mode", 
                value="Sentiment Analysis"
            )
            
            # Conditional inputs based on mode
            context = gr.Textbox(
                label="๐Ÿ“š Context (for Question Answering)",
                placeholder="Paste the context text here...",
                lines=3,
                visible=False
            )
            
            candidate_labels = gr.Textbox(
                label="๐Ÿท๏ธ Candidate Labels (comma-separated, for Zero-Shot Classification)",
                placeholder="e.g., politics, sports, technology, science",
                visible=False
            )
            
            # Show/hide additional inputs based on mode selection
            def toggle_additional_inputs(selected_mode):
                return {
                    context: gr.update(visible=selected_mode == "Question Answering"),
                    candidate_labels: gr.update(visible=selected_mode == "Zero-Shot Classification")
                }
            
            mode.change(toggle_additional_inputs, inputs=mode, outputs=[context, candidate_labels])
            
            submit_btn = gr.Button("Process", variant="primary")
            
        with gr.Column(scale=1):
            output_text = gr.Textbox(label="โž• AI Output", lines=8)
    
    # Examples
    gr.Examples(
        examples=[
            ["I am absolutely thrilled with this product! It's everything I hoped for and more.", "Sentiment Analysis", None, None],
            ["""The field of artificial intelligence (AI) has seen unprecedented growth in the last decade. Breakthroughs in machine learning, particularly deep learning, have driven advancements in areas from computer vision to natural language processing. Companies across all sectors are investing heavily in AI research and implementation, hoping to gain a competitive edge. This rapid expansion has also sparked important debates about ethics, bias in algorithms, and the future of work. While the potential benefits are vast, experts urge for careful consideration of the societal impacts to ensure the technology is developed and used responsibly.""", "Text Summarization", None, None],
            ["Hello, world! How are you today?", "Translation (EN to FR)", None, None],
            ["What is the capital of France?", "Question Answering", "France is a country in Europe. Paris is the capital city of France.", None],
            ["Once upon a time in a land far, far away", "Text Generation", None, None],
            ["Apple Inc. was founded by Steve Jobs in Cupertino, California.", "Named Entity Recognition", None, None],
            ["The new smartphone has a great camera and long battery life", "Zero-Shot Classification", None, "technology, photography, travel"]
        ],
        inputs=[input_text, mode, context, candidate_labels],
        outputs=output_text,
        fn=process_text,
        cache_examples=False
    )
    
    submit_btn.click(
        fn=process_text,
        inputs=[input_text, mode, context, candidate_labels],
        outputs=output_text
    )

# Launch the app
if __name__ == "__main__":
    demo.launch()