Spaces:
Build error
Build error
https://github.com/kjgpta/Vibe-Fusion.git
#1
by
kjgpta
- opened
- .env +0 -11
- .gitattributes +0 -2
- .gitignore +0 -4
- Dockerfile +0 -11
- LICENSE +0 -21
- README copy.md +0 -502
- VibeFusion-DesignSpec.pdf +0 -3
- create_catalog.py +0 -182
- design.png +0 -3
- env.example +0 -10
- image.png +0 -0
- recommendation_system.py +0 -349
- requirements.txt +0 -12
- run.py +0 -145
- streamlit_app.py +0 -342
- test_system.py +0 -165
- vercel.json +0 -10
.env
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
# OpenAI API Configuration
|
| 2 |
-
openai_api_key='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'
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
# App Configuration
|
| 6 |
-
app_debug=False
|
| 7 |
-
similarity_threshold=0.8
|
| 8 |
-
|
| 9 |
-
# Data Configuration
|
| 10 |
-
catalog_file=data/Apparels_shared.xlsx
|
| 11 |
-
vibes_data_dir=data/vibes/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitattributes
CHANGED
|
@@ -33,5 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
design.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
VibeFusion-DesignSpec.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
.gitignore
DELETED
|
@@ -1,4 +0,0 @@
|
|
| 1 |
-
venv/
|
| 2 |
-
.env
|
| 3 |
-
__pycache__/
|
| 4 |
-
nltk_data/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Dockerfile
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
# Dockerfile
|
| 2 |
-
FROM python:3.9-slim
|
| 3 |
-
|
| 4 |
-
WORKDIR /app
|
| 5 |
-
COPY requirements.txt .
|
| 6 |
-
|
| 7 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
| 8 |
-
|
| 9 |
-
COPY . .
|
| 10 |
-
|
| 11 |
-
CMD ["python", "run.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
|
| 3 |
-
Copyright (c) 2025 Kshitij Gupta
|
| 4 |
-
|
| 5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
-
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
-
in the Software without restriction, including without limitation the rights
|
| 8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
-
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
-
furnished to do so, subject to the following conditions:
|
| 11 |
-
|
| 12 |
-
The above copyright notice and this permission notice shall be included in all
|
| 13 |
-
copies or substantial portions of the Software.
|
| 14 |
-
|
| 15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README copy.md
DELETED
|
@@ -1,502 +0,0 @@
|
|
| 1 |
-
# 🌟 Vibe Fusion
|
| 2 |
-
|
| 3 |
-
AI-powered fashion recommendation system that transforms natural language style descriptions into personalized outfit suggestions. The system combines rule-based matching, semantic similarity, and GPT-powered inference to understand user "vibes" and translate them into actionable product recommendations.
|
| 4 |
-
|
| 5 |
-
## ✨ Key Features & Innovations
|
| 6 |
-
|
| 7 |
-
### 🧠 **Hybrid Intelligence Architecture**
|
| 8 |
-
- **Rule-based + AI hybrid**: Combines fast, reliable rule-based matching with intelligent GPT fallback
|
| 9 |
-
- **Confidence-aware processing**: Automatically determines when to use GPT vs rule-based matching
|
| 10 |
-
- **Semantic understanding**: Uses spaCy word vectors and TF-IDF for contextual phrase matching
|
| 11 |
-
- **Knowledge base approach**: Curated vibe-to-attribute JSON mappings for fashion domain expertise
|
| 12 |
-
|
| 13 |
-
### 💬 **Conversational Interface**
|
| 14 |
-
- **Multi-turn conversations**: Maintains context across multiple user interactions
|
| 15 |
-
- **Smart follow-up questions**: Automatically asks for missing critical attributes
|
| 16 |
-
- **Natural language processing**: Handles complex queries like "something elegant for a dinner date"
|
| 17 |
-
- **Context preservation**: Remembers user preferences and partial requirements throughout the session
|
| 18 |
-
- **Intelligent state management**: Tracks conversation history, pending attributes, and user preferences
|
| 19 |
-
- **Progressive refinement**: Each interaction builds upon previous context for better recommendations
|
| 20 |
-
- **Graceful conversation reset**: Users can start fresh while preserving useful context
|
| 21 |
-
|
| 22 |
-
### 🎯 **Advanced Query Understanding**
|
| 23 |
-
- **Compound phrase recognition**: Understands "summer brunch" as season+occasion combination
|
| 24 |
-
- **Fashion-specific NLP**: Specialized entity extraction for clothing categories, fits, occasions
|
| 25 |
-
- **Fuzzy matching**: "comfy" automatically maps to "comfortable" and "relaxed"
|
| 26 |
-
- **Multi-modal attributes**: Processes style, season, occasion, fit, color, and occasion simultaneously
|
| 27 |
-
|
| 28 |
-
### 📊 **Intelligent Product Filtering**
|
| 29 |
-
- **Multi-criteria search**: Filters by category, price, size, fit, fabric, color, occasion
|
| 30 |
-
- **Category-aware validation**: Different attributes are relevant for tops vs dresses vs pants
|
| 31 |
-
- **Flexible size handling**: Supports both traditional (S/M/L) and numeric sizing
|
| 32 |
-
- **Smart price context**: Provides value context based on user budget
|
| 33 |
-
|
| 34 |
-
### 🎨 **Design & User Experience**
|
| 35 |
-
- **Intuitive conversational interface**: Natural chat-based interaction with visual feedback
|
| 36 |
-
- **Progressive disclosure**: Information revealed step-by-step to avoid overwhelming users
|
| 37 |
-
- **Visual product showcase**: Rich product displays with images and detailed descriptions
|
| 38 |
-
- **Responsive design**: Optimized for desktop, tablet, and mobile experiences
|
| 39 |
-
- **Accessibility features**: Screen reader support, keyboard navigation, high contrast options
|
| 40 |
-
|
| 41 |
-
## 🏗️ **System Architecture & Design Decisions**
|
| 42 |
-
|
| 43 |
-
### **Modular Pipeline Design**
|
| 44 |
-
|
| 45 |
-
The system follows a sophisticated pipeline architecture that processes user queries through multiple specialized stages:
|
| 46 |
-
|
| 47 |
-
```
|
| 48 |
-
User Query → NLP Analysis → Similarity Matching → GPT Inference → Product Filtering → NLG Response
|
| 49 |
-
```
|
| 50 |
-
|
| 51 |
-
### **1. NLP Analyzer (`modules/nlp_analyzer.py`)**
|
| 52 |
-
|
| 53 |
-
**Design Decision**: Hybrid spaCy + NLTK approach
|
| 54 |
-
- **Why chosen**: spaCy provides excellent tokenization and entity recognition, while NLTK offers robust stopword filtering
|
| 55 |
-
- **Benefits over alternatives**: More accurate than pure regex, faster than full transformer models
|
| 56 |
-
- **Fashion-specific patterns**: Curated lists of occasions, styles, fits, colors specific to fashion domain
|
| 57 |
-
- **Compound phrase detection**: Recognizes multi-word fashion concepts like "summer brunch"
|
| 58 |
-
|
| 59 |
-
**Key Features**:
|
| 60 |
-
- Fashion-specific entity patterns for occasions, seasons, styles, categories, fits
|
| 61 |
-
- Intelligent text cleaning that preserves fashion terminology
|
| 62 |
-
- Confidence scoring for extracted attributes
|
| 63 |
-
- Budget extraction with currency normalization
|
| 64 |
-
|
| 65 |
-
### **2. Similarity Matcher (`modules/similarity_matcher.py`)**
|
| 66 |
-
|
| 67 |
-
**Design Decision**: Dual similarity approach (spaCy + TF-IDF)
|
| 68 |
-
- **Why chosen**: spaCy provides semantic understanding, TF-IDF provides robust string matching
|
| 69 |
-
- **Benefits**: Handles both exact matches ("bodycon") and semantic matches ("form-fitting" → "bodycon")
|
| 70 |
-
- **Knowledge base approach**: JSON mappings allow easy updates without retraining models
|
| 71 |
-
|
| 72 |
-
**Vibe-to-Attribute Mappings**:
|
| 73 |
-
- `fit_mapping.json`: 20+ fit descriptors → standardized fit attributes
|
| 74 |
-
- `color_mapping.json`: Color families and style descriptors → specific color palettes
|
| 75 |
-
- `occasion_mapping.json`: 25+ complex occasion scenarios with full attribute sets
|
| 76 |
-
- `fabric_mapping.json`: Fabric types and characteristics
|
| 77 |
-
|
| 78 |
-
**Confidence-based Processing**:
|
| 79 |
-
- Similarity threshold (default 0.8) determines when to trust rule-based matches
|
| 80 |
-
- High confidence matches skip GPT inference for speed
|
| 81 |
-
- Low confidence triggers GPT fallback for better coverage
|
| 82 |
-
|
| 83 |
-
### **3. GPT Inference (`modules/gpt_inference.py`)**
|
| 84 |
-
|
| 85 |
-
**Design Decision**: GPT-4 as intelligent fallback, not primary engine
|
| 86 |
-
- **Why chosen**: GPT provides broad coverage but is slower and requires API calls
|
| 87 |
-
- **Benefits**: Handles edge cases and complex queries that rule-based matching misses
|
| 88 |
-
- **Structured output**: Enforces JSON schema with validation against known valid values
|
| 89 |
-
- **Graceful degradation**: System works without GPT, just with reduced functionality
|
| 90 |
-
|
| 91 |
-
**Smart Prompting Strategy**:
|
| 92 |
-
- Category-aware attribute validation (tops have different attributes than dresses)
|
| 93 |
-
- Extensive valid value lists prevent hallucination
|
| 94 |
-
- Context injection from existing rule-based matches
|
| 95 |
-
- Temperature 0.3 for consistent, focused responses
|
| 96 |
-
|
| 97 |
-
### **4. Catalog Filter (`modules/catalog_filter.py`)**
|
| 98 |
-
|
| 99 |
-
**Design Decision**: Pandas-based flexible filtering with category awareness
|
| 100 |
-
- **Why chosen**: Fast filtering on structured data, easy to extend with new criteria
|
| 101 |
-
- **Benefits**: Supports complex multi-criteria searches with partial matching
|
| 102 |
-
- **Category-specific logic**: Different filtering rules for tops, dresses, skirts, pants
|
| 103 |
-
|
| 104 |
-
**Advanced Filtering Features**:
|
| 105 |
-
- **Color family matching**: "pastels" expands to multiple specific colors
|
| 106 |
-
- **Size compatibility**: Flexible size matching across different size systems
|
| 107 |
-
- **Fabric lists**: Handles multiple fabric preferences (["silk", "satin"])
|
| 108 |
-
- **Price context awareness**: Sorts and prioritizes based on budget constraints
|
| 109 |
-
|
| 110 |
-
### **5. NLG Generator (`modules/nlg_generator.py`)**
|
| 111 |
-
|
| 112 |
-
**Design Decision**: Template-based generation with contextual adaptation
|
| 113 |
-
- **Why chosen**: More reliable and controllable than pure generative models
|
| 114 |
-
- **Benefits**: Consistent tone, factual accuracy, domain-appropriate language
|
| 115 |
-
- **Context-aware**: Different response styles for single vs multiple products
|
| 116 |
-
|
| 117 |
-
## 🎯 **Why This Architecture? Benefits Over Alternatives**
|
| 118 |
-
|
| 119 |
-
### **Hybrid Approach vs Pure AI/Pure Rules**
|
| 120 |
-
- **Pure rule-based systems**: Limited to predefined patterns, can't handle edge cases
|
| 121 |
-
- **Pure AI systems**: Expensive, unpredictable, require constant fine-tuning
|
| 122 |
-
- **Our hybrid approach**: Fast rule-based for common cases + AI for complexity = best of both worlds
|
| 123 |
-
|
| 124 |
-
### **Knowledge Base vs End-to-End Learning**
|
| 125 |
-
- **End-to-end neural models**: Require massive training data, black box decisions
|
| 126 |
-
- **Our curated knowledge base**: Transparent, easily updatable, leverages human expertise
|
| 127 |
-
- **Benefits**: Explainable recommendations, easy maintenance, domain expert friendly
|
| 128 |
-
|
| 129 |
-
### **Modular vs Monolithic Design**
|
| 130 |
-
- **Monolithic systems**: Hard to debug, upgrade, or customize
|
| 131 |
-
- **Our modular pipeline**: Each component can be independently improved or replaced
|
| 132 |
-
- **Benefits**: Easier testing, flexible deployment, clear separation of concerns
|
| 133 |
-
|
| 134 |
-
### **Conversational vs Form-Based Interface**
|
| 135 |
-
- **Traditional forms**: Limited expression, rigid interaction patterns
|
| 136 |
-
- **Our conversational UI**: Natural expression, progressive refinement, context awareness
|
| 137 |
-
- **Benefits**: Better user experience, handles ambiguity, learns user preferences
|
| 138 |
-
|
| 139 |
-
## 🎨 **Design Documentation**
|
| 140 |
-
|
| 141 |
-
Vibe Fusion features a carefully crafted user experience designed to make fashion discovery intuitive and enjoyable.
|
| 142 |
-
|
| 143 |
-
### **Visual Design System**
|
| 144 |
-

|
| 145 |
-
|
| 146 |
-
The application follows modern design principles with:
|
| 147 |
-
- **Clean, minimalist interface** that puts content first
|
| 148 |
-
- **Warm, approachable color palette** that reflects fashion and style
|
| 149 |
-
- **Typography hierarchy** that guides users through conversations
|
| 150 |
-
- **Consistent iconography** for actions and status indicators
|
| 151 |
-
|
| 152 |
-
### **Complete Design Specification**
|
| 153 |
-
📋 [VibeFusion Design Specification](VibeFusion-DesignSpec.pdf)
|
| 154 |
-
|
| 155 |
-
Our comprehensive design document covers:
|
| 156 |
-
- **User Experience Strategy**: Conversational flow design and interaction patterns
|
| 157 |
-
- **Visual Identity**: Color schemes, typography, iconography, and branding guidelines
|
| 158 |
-
- **Component Library**: Reusable UI components and design tokens
|
| 159 |
-
- **Responsive Layouts**: Multi-device experience optimization
|
| 160 |
-
- **Accessibility Guidelines**: Inclusive design practices and WCAG compliance
|
| 161 |
-
- **Usability Testing Results**: User feedback integration and iterative improvements
|
| 162 |
-
|
| 163 |
-
### **Key Design Principles**
|
| 164 |
-
|
| 165 |
-
**1. Conversation-First Design**
|
| 166 |
-
- Natural language input prioritized over complex forms
|
| 167 |
-
- Visual feedback for each step in the recommendation process
|
| 168 |
-
- Context preservation through visual conversation history
|
| 169 |
-
|
| 170 |
-
**2. Progressive Enhancement**
|
| 171 |
-
- Core functionality works without JavaScript
|
| 172 |
-
- Enhanced interactions for modern browsers
|
| 173 |
-
- Graceful degradation for accessibility devices
|
| 174 |
-
|
| 175 |
-
**3. Fashion-Forward Aesthetics**
|
| 176 |
-
- Visual design reflects current fashion trends
|
| 177 |
-
- Product imagery and styling considerations
|
| 178 |
-
- Color psychology applied to enhance user engagement
|
| 179 |
-
|
| 180 |
-
**4. Performance-Optimized**
|
| 181 |
-
- Fast loading times with optimized assets
|
| 182 |
-
- Lazy loading for product images and recommendations
|
| 183 |
-
- Efficient state management for smooth interactions
|
| 184 |
-
|
| 185 |
-
## 🚀 **Quick Start & Setup**
|
| 186 |
-
|
| 187 |
-
### **One-Command Launch**
|
| 188 |
-
```bash
|
| 189 |
-
python3 run.py
|
| 190 |
-
```
|
| 191 |
-
|
| 192 |
-
This automated setup script:
|
| 193 |
-
1. ✅ Validates Python 3.8+ compatibility
|
| 194 |
-
2. 📦 Installs all dependencies from `requirements.txt`
|
| 195 |
-
3. 🤖 Downloads spaCy English model (`en_core_web_md`)
|
| 196 |
-
4. 📊 Generates sample product catalog with realistic fashion data
|
| 197 |
-
5. 🔧 Checks environment configuration and provides guidance
|
| 198 |
-
6. 🚀 Launches Streamlit web application on `localhost:8501`
|
| 199 |
-
|
| 200 |
-
### **Manual Setup (Alternative)**
|
| 201 |
-
```bash
|
| 202 |
-
# Install dependencies
|
| 203 |
-
pip install -r requirements.txt
|
| 204 |
-
|
| 205 |
-
# Download required spaCy model
|
| 206 |
-
python -m spacy download en_core_web_md
|
| 207 |
-
|
| 208 |
-
# Create sample catalog
|
| 209 |
-
python create_catalog.py
|
| 210 |
-
|
| 211 |
-
# Configure environment (optional)
|
| 212 |
-
cp env.example .env
|
| 213 |
-
# Edit .env with your OpenAI API key
|
| 214 |
-
|
| 215 |
-
# Start application
|
| 216 |
-
python run.py
|
| 217 |
-
```
|
| 218 |
-
|
| 219 |
-
## 🔧 **Configuration & Customization**
|
| 220 |
-
|
| 221 |
-
### **Environment Variables**
|
| 222 |
-
```bash
|
| 223 |
-
# OpenAI API (enables GPT inference)
|
| 224 |
-
openai_api_key=your_openai_api_key_here
|
| 225 |
-
|
| 226 |
-
# System tuning
|
| 227 |
-
similarity_threshold=0.8 # Rule-based confidence threshold
|
| 228 |
-
streamlit_server_port=8501 # Web interface port
|
| 229 |
-
```
|
| 230 |
-
|
| 231 |
-
### **Data Configuration**
|
| 232 |
-
|
| 233 |
-
**Product Catalog** (`data/Apparels_shared.xlsx`):
|
| 234 |
-
- Structured product data with category-specific attributes
|
| 235 |
-
- Flexible schema supporting different clothing types
|
| 236 |
-
- Price, size, and availability information
|
| 237 |
-
|
| 238 |
-
**Vibe Knowledge Base** (`data/vibes/`):
|
| 239 |
-
- **`fit_mapping.json`**: Maps casual language ("comfy", "snug") to standard fits
|
| 240 |
-
- **`color_mapping.json`**: Maps color families ("pastels", "earth tones") to specific colors
|
| 241 |
-
- **`occasion_mapping.json`**: Complex scenario mappings ("romantic dinner date" → full attribute set)
|
| 242 |
-
- **`fabric_mapping.json`**: Fabric characteristics and style associations
|
| 243 |
-
|
| 244 |
-
**Knowledge Base Structure Example**:
|
| 245 |
-
```json
|
| 246 |
-
// fit_mapping.json - Simple term-to-attribute mapping
|
| 247 |
-
{
|
| 248 |
-
"comfy": {"fit": "Relaxed"},
|
| 249 |
-
"bodycon": {"fit": "Body hugging"},
|
| 250 |
-
"oversized": {"fit": "Oversized"}
|
| 251 |
-
}
|
| 252 |
-
|
| 253 |
-
// occasion_mapping.json - Complex scenario mapping
|
| 254 |
-
{
|
| 255 |
-
"romantic dinner date": {
|
| 256 |
-
"fabric": ["Silk", "Satin", "Chiffon"],
|
| 257 |
-
"neckline": ["V neck", "Sweetheart", "Cowl neck"],
|
| 258 |
-
"occasion": "Evening",
|
| 259 |
-
"fit": "Body hugging"
|
| 260 |
-
}
|
| 261 |
-
}
|
| 262 |
-
```
|
| 263 |
-
|
| 264 |
-
**Catalog Schema Design**:
|
| 265 |
-
- **Category-specific attributes**: Tops have sleeve_length, dresses have neckline, etc.
|
| 266 |
-
- **Flexible sizing**: Supports both traditional (XS-XXL) and numeric sizes
|
| 267 |
-
- **Color standardization**: Consistent color naming across products
|
| 268 |
-
- **Price and availability**: Real-time inventory considerations
|
| 269 |
-
|
| 270 |
-
## 📋 **Usage Examples & Capabilities**
|
| 271 |
-
|
| 272 |
-
### **Conversational Interface Examples**
|
| 273 |
-
|
| 274 |
-
The Vibe Fusion interface presents conversations in an intuitive chat format with visual cues, product images, and interactive elements. See our [design documentation](#-design-documentation) for complete UI/UX details.
|
| 275 |
-
|
| 276 |
-
**Simple Request**:
|
| 277 |
-
```
|
| 278 |
-
User: "Something casual for a summer brunch"
|
| 279 |
-
System: "Perfect! I found a light pink linen top that would be ideal for summer brunch.
|
| 280 |
-
Relaxed fit with short sleeves. It's $40 and would work beautifully for casual occasions."
|
| 281 |
-
```
|
| 282 |
-
|
| 283 |
-
**Complex Multi-turn Conversation**:
|
| 284 |
-
```
|
| 285 |
-
User: "I need something elegant for a dinner date"
|
| 286 |
-
System: "I'd love to help you find something elegant! What size do you prefer, and do you have a budget in mind?"
|
| 287 |
-
|
| 288 |
-
User: "Size M, around $100"
|
| 289 |
-
System: "Got it! Do you prefer a dress, or would you like separates like a top and skirt?"
|
| 290 |
-
|
| 291 |
-
User: "A dress would be perfect"
|
| 292 |
-
System: "Wonderful! I found a beautiful sapphire blue silk dress in size M. Body-hugging fit
|
| 293 |
-
with a V-neck, perfect for evening occasions. At $95, it's a great value and ideal for dinner dates."
|
| 294 |
-
```
|
| 295 |
-
|
| 296 |
-
### **Advanced Query Understanding**
|
| 297 |
-
|
| 298 |
-
The system understands complex, nuanced requests:
|
| 299 |
-
|
| 300 |
-
- **"Boho chic vibes"** → Linen/chiffon fabrics, bell sleeves, relaxed fits
|
| 301 |
-
- **"Office-ready polish"** → Tailored fits, cotton fabrics, collar necklines
|
| 302 |
-
- **"Party ready sequin pieces"** → Sequin/metallic colors, bodycon fits
|
| 303 |
-
- **"Minimalist aesthetic"** → Neutral colors, tailored fits, clean lines
|
| 304 |
-
- **"Comfortable wide leg pants under $60"** → Specific category, fit, price filtering
|
| 305 |
-
|
| 306 |
-
### **Smart Attribute Inference**
|
| 307 |
-
|
| 308 |
-
- **Compound phrases**: "Summer brunch" = casual occasion + light fabrics + appropriate colors
|
| 309 |
-
- **Style implications**: "Elegant" suggests tailored fits, premium fabrics, sophisticated colors
|
| 310 |
-
- **Seasonal context**: "Winter formal" implies longer sleeves, darker colors, structured fits
|
| 311 |
-
- **Budget optimization**: Prioritizes products within stated budget, provides value context
|
| 312 |
-
|
| 313 |
-
## 🎯 **System Capabilities & Intelligence**
|
| 314 |
-
|
| 315 |
-
### **Natural Language Understanding**
|
| 316 |
-
- **Occasions**: brunch, work, party, date, wedding, gym, vacation, interview
|
| 317 |
-
- **Styles**: casual, formal, elegant, edgy, bohemian, chic, minimalist, romantic
|
| 318 |
-
- **Seasons**: summer, winter, spring, fall + seasonal implications
|
| 319 |
-
- **Fits**: relaxed, tailored, bodycon, oversized, slim, flowy + casual descriptors
|
| 320 |
-
- **Colors**: Complex color families, specific shades, pattern recognition
|
| 321 |
-
- **Categories**: dress, top, blazer, jeans, pants, skirt, activewear + subcategories
|
| 322 |
-
|
| 323 |
-
### **Smart Processing Features**
|
| 324 |
-
- **Context preservation**: Maintains conversation state across multiple turns
|
| 325 |
-
- **Confidence-based routing**: Automatically chooses optimal processing strategy
|
| 326 |
-
- **Graceful degradation**: Functions without GPT API, provides useful feedback on missing components
|
| 327 |
-
- **Real-time validation**: Checks user preferences against available inventory
|
| 328 |
-
- **Flexible sizing**: Handles multiple size systems and conversions
|
| 329 |
-
|
| 330 |
-
## 📁 **Project Structure & Technical Details**
|
| 331 |
-
|
| 332 |
-
```
|
| 333 |
-
VibeApparelRecommender/
|
| 334 |
-
├── 📜 run.py # Automated setup and launch script
|
| 335 |
-
├── 🌐 streamlit_app.py # Conversational web interface
|
| 336 |
-
├── 📊 create_catalog.py # Sample data generator with realistic products
|
| 337 |
-
├── 🧪 test_system.py # Comprehensive system validation tests
|
| 338 |
-
├── 📋 requirements.txt # Python dependencies with version management
|
| 339 |
-
├── 🔧 .env.example # Environment configuration template
|
| 340 |
-
├── 📖 README.md # This comprehensive documentation
|
| 341 |
-
├── 🎨 design.png # Visual design system overview
|
| 342 |
-
├── 📋 VibeFusion-DesignSpec.pdf # Complete design specification document
|
| 343 |
-
├── 📄 LICENSE # MIT license file
|
| 344 |
-
├── 📁 recommendation_system.py # Main coordinator and pipeline orchestrator
|
| 345 |
-
├── 📁 modules/ # Modular system components
|
| 346 |
-
│ ├── nlp_analyzer.py # spaCy + NLTK natural language processing
|
| 347 |
-
│ ├── similarity_matcher.py # Cosine similarity + semantic matching
|
| 348 |
-
│ ├── gpt_inference.py # OpenAI GPT-4 intelligent fallback
|
| 349 |
-
│ ├── catalog_filter.py # Multi-criteria product filtering
|
| 350 |
-
│ └── nlg_generator.py # Template-based response generation
|
| 351 |
-
└── 📁 data/ # Knowledge base and catalog data
|
| 352 |
-
├── Apparels_shared.xlsx # Structured product catalog
|
| 353 |
-
└── vibes/ # Fashion domain knowledge
|
| 354 |
-
├── fit_mapping.json # Fit terminology → standard attributes
|
| 355 |
-
├── color_mapping.json # Color families → specific colors
|
| 356 |
-
├── occasion_mapping.json # Complex scenarios → full attribute sets
|
| 357 |
-
└── fabric_mapping.json # Fabric characteristics and associations
|
| 358 |
-
```
|
| 359 |
-
|
| 360 |
-
## 🛠️ **Development & Extension**
|
| 361 |
-
|
| 362 |
-
### **Adding New Products**
|
| 363 |
-
```python
|
| 364 |
-
# Edit create_catalog.py to add more products
|
| 365 |
-
catalog_data.append({
|
| 366 |
-
"Product_ID": "NEW001",
|
| 367 |
-
"Name": "Your New Product",
|
| 368 |
-
"Category": "Dress",
|
| 369 |
-
"Price": 75,
|
| 370 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 371 |
-
"Fit": "Relaxed",
|
| 372 |
-
"Color": "Blue",
|
| 373 |
-
"Fabric": "Linen",
|
| 374 |
-
# ... other category-specific attributes
|
| 375 |
-
})
|
| 376 |
-
```
|
| 377 |
-
|
| 378 |
-
### **Extending Vibe Knowledge Base**
|
| 379 |
-
```json
|
| 380 |
-
// Add to data/vibes/occasion_mapping.json
|
| 381 |
-
{
|
| 382 |
-
"weekend farmers market": {
|
| 383 |
-
"category": "top",
|
| 384 |
-
"fit": "Relaxed",
|
| 385 |
-
"fabric": ["Cotton", "Linen"],
|
| 386 |
-
"sleeve_length": "Short sleeves",
|
| 387 |
-
"color_or_print": ["Earth tones", "Natural"]
|
| 388 |
-
}
|
| 389 |
-
}
|
| 390 |
-
```
|
| 391 |
-
|
| 392 |
-
### **Customizing Response Generation**
|
| 393 |
-
```python
|
| 394 |
-
# Modify modules/nlg_generator.py
|
| 395 |
-
self.templates['single_product'].append(
|
| 396 |
-
"Perfect match! {description} would be amazing for {context}. {details} Only ${price}!"
|
| 397 |
-
)
|
| 398 |
-
```
|
| 399 |
-
|
| 400 |
-
## 🔍 **Testing & Validation**
|
| 401 |
-
|
| 402 |
-
### **Automated Testing Suite**
|
| 403 |
-
```bash
|
| 404 |
-
python test_system.py
|
| 405 |
-
```
|
| 406 |
-
|
| 407 |
-
**Test Coverage**:
|
| 408 |
-
- ✅ File structure and dependency validation
|
| 409 |
-
- ✅ JSON knowledge base loading and validation
|
| 410 |
-
- ✅ Product catalog structure and content verification
|
| 411 |
-
- ✅ System initialization and module integration
|
| 412 |
-
- ✅ Basic recommendation pipeline functionality
|
| 413 |
-
|
| 414 |
-
### **System Status Monitoring**
|
| 415 |
-
The Streamlit interface provides real-time system status:
|
| 416 |
-
- spaCy model availability and version
|
| 417 |
-
- GPT API connectivity and quotas
|
| 418 |
-
- Knowledge base loading status
|
| 419 |
-
- Product catalog statistics and health
|
| 420 |
-
|
| 421 |
-
### **Troubleshooting Common Issues**
|
| 422 |
-
|
| 423 |
-
**spaCy Model Issues**:
|
| 424 |
-
```bash
|
| 425 |
-
# Download missing model
|
| 426 |
-
python -m spacy download en_core_web_md
|
| 427 |
-
|
| 428 |
-
# Verify installation
|
| 429 |
-
python -c "import spacy; nlp = spacy.load('en_core_web_md'); print('✅ spaCy model loaded')"
|
| 430 |
-
```
|
| 431 |
-
|
| 432 |
-
**OpenAI API Issues**:
|
| 433 |
-
- Ensure API key is valid and has sufficient credits
|
| 434 |
-
- System gracefully degrades to rule-based matching without GPT
|
| 435 |
-
- Check rate limits if experiencing delays
|
| 436 |
-
|
| 437 |
-
**Import/Path Issues**:
|
| 438 |
-
- Run from project root directory
|
| 439 |
-
- Ensure virtual environment is activated if used
|
| 440 |
-
- Check that all dependencies are installed: `pip install -r requirements.txt`
|
| 441 |
-
|
| 442 |
-
## 🚀 **Performance & Scalability**
|
| 443 |
-
|
| 444 |
-
### **Processing Speed**
|
| 445 |
-
- **Rule-based matching**: ~10-50ms per query
|
| 446 |
-
- **With GPT fallback**: ~1-3 seconds (network dependent)
|
| 447 |
-
- **Catalog filtering**: ~1-10ms for 1000+ products
|
| 448 |
-
- **Memory footprint**: ~100-200MB (including spaCy model)
|
| 449 |
-
|
| 450 |
-
### **Scalability Considerations**
|
| 451 |
-
- **Horizontal scaling**: Stateless design allows multiple instances
|
| 452 |
-
- **Caching opportunities**: Vibe mappings, product catalog, spaCy models
|
| 453 |
-
- **Database integration**: Easy migration from Excel to SQL/NoSQL
|
| 454 |
-
- **API rate limiting**: Built-in GPT usage optimization
|
| 455 |
-
|
| 456 |
-
### **Quality Assurance**
|
| 457 |
-
- **Fallback mechanisms**: Multiple processing strategies ensure robustness
|
| 458 |
-
- **Validation layers**: Input sanitization, output verification, error handling
|
| 459 |
-
- **Confidence scoring**: Transparent quality metrics for recommendations
|
| 460 |
-
- **User feedback integration**: Framework for continuous improvement
|
| 461 |
-
|
| 462 |
-
## 🤝 **Contributing & Customization**
|
| 463 |
-
|
| 464 |
-
### **Architecture Benefits**
|
| 465 |
-
- **Modular design**: Easy to replace or enhance individual components
|
| 466 |
-
- **Clear interfaces**: Well-defined APIs between modules
|
| 467 |
-
- **Configuration-driven**: Behavior modification without code changes
|
| 468 |
-
- **Extensible knowledge base**: JSON format allows non-technical updates
|
| 469 |
-
- **Multi-modal input**: Text, preferences, historical data integration ready
|
| 470 |
-
|
| 471 |
-
### **Extension Points**
|
| 472 |
-
1. **New data sources**: Additional product catalogs, user preference databases
|
| 473 |
-
2. **Enhanced NLP**: Sentiment analysis, brand preferences, style evolution
|
| 474 |
-
3. **Recommendation algorithms**: Collaborative filtering, trend analysis
|
| 475 |
-
4. **User personalization**: Purchase history, style profiles, size preferences
|
| 476 |
-
5. **Business logic**: Inventory management, seasonal recommendations, promotions
|
| 477 |
-
|
| 478 |
-
## 📄 **License & Acknowledgments**
|
| 479 |
-
|
| 480 |
-
**License**: MIT License - see [LICENSE](LICENSE) file for details
|
| 481 |
-
|
| 482 |
-
**Technologies & Dependencies**:
|
| 483 |
-
- **spaCy** (3.4+): Advanced natural language processing and semantic similarity
|
| 484 |
-
- **OpenAI GPT-4**: Intelligent attribute inference and complex query handling
|
| 485 |
-
- **Streamlit** (1.28+): Interactive web interface with real-time updates
|
| 486 |
-
- **Pandas** (1.5+): Efficient data manipulation and filtering
|
| 487 |
-
- **scikit-learn** (1.2+): TF-IDF vectorization and cosine similarity
|
| 488 |
-
- **NLTK** (3.8+): Text preprocessing and stopword filtering
|
| 489 |
-
|
| 490 |
-
**Design Philosophy**:
|
| 491 |
-
Vibe Fusion embodies a "human-AI collaboration" approach where AI augments human domain expertise rather than replacing it. The curated knowledge base captures fashion expertise, while AI handles the complexity of natural language understanding and edge cases.
|
| 492 |
-
|
| 493 |
-
**User Experience Philosophy**:
|
| 494 |
-
- **Conversation over forms**: Natural language interaction feels more personal and intuitive
|
| 495 |
-
- **Progressive disclosure**: Information is revealed gradually to prevent cognitive overload
|
| 496 |
-
- **Visual storytelling**: Product recommendations are presented with rich context and imagery
|
| 497 |
-
- **Inclusive design**: Accessibility and usability considerations for diverse user needs
|
| 498 |
-
- **Performance first**: Fast, responsive interactions that respect user time and attention
|
| 499 |
-
|
| 500 |
-
---
|
| 501 |
-
|
| 502 |
-
**🌟 Built with intelligence and style** | Transform your fashion ideas into perfect recommendations!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VibeFusion-DesignSpec.pdf
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:5e6ff96de3f9065d4d26ace64c18bf13f3b58730581039c2aaf0d18b4fb317f4
|
| 3 |
-
size 542404
|
|
|
|
|
|
|
|
|
|
|
|
create_catalog.py
DELETED
|
@@ -1,182 +0,0 @@
|
|
| 1 |
-
import pandas as pd
|
| 2 |
-
import os
|
| 3 |
-
|
| 4 |
-
# Create sample product catalog data
|
| 5 |
-
catalog_data = [
|
| 6 |
-
{
|
| 7 |
-
"Product_ID": "D001",
|
| 8 |
-
"Name": "Rosemary Floral Sundress",
|
| 9 |
-
"Category": "Dress",
|
| 10 |
-
"Price": 85,
|
| 11 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 12 |
-
"Fit": "Relaxed",
|
| 13 |
-
"Color": "Pink",
|
| 14 |
-
"Pattern": "Floral",
|
| 15 |
-
"Sleeve_Length": "Sleeveless",
|
| 16 |
-
"Neckline": "V-neck",
|
| 17 |
-
"Length": "Knee-length",
|
| 18 |
-
"Occasion": "Casual",
|
| 19 |
-
"Season": "Summer",
|
| 20 |
-
"Brand": "SunnyDays",
|
| 21 |
-
"Description": "Light and airy floral sundress perfect for summer occasions"
|
| 22 |
-
},
|
| 23 |
-
{
|
| 24 |
-
"Product_ID": "D002",
|
| 25 |
-
"Name": "Daisy Print Midi Dress",
|
| 26 |
-
"Category": "Dress",
|
| 27 |
-
"Price": 90,
|
| 28 |
-
"Available_Sizes": "S,M,L,XL",
|
| 29 |
-
"Fit": "Relaxed",
|
| 30 |
-
"Color": "Yellow",
|
| 31 |
-
"Pattern": "Floral",
|
| 32 |
-
"Sleeve_Length": "Sleeveless",
|
| 33 |
-
"Neckline": "Round neck",
|
| 34 |
-
"Length": "Midi",
|
| 35 |
-
"Occasion": "Casual",
|
| 36 |
-
"Season": "Summer",
|
| 37 |
-
"Brand": "FloralFashion",
|
| 38 |
-
"Description": "Cheerful daisy print midi dress with comfortable fit"
|
| 39 |
-
},
|
| 40 |
-
{
|
| 41 |
-
"Product_ID": "B001",
|
| 42 |
-
"Name": "Classic Navy Blazer",
|
| 43 |
-
"Category": "Blazer",
|
| 44 |
-
"Price": 120,
|
| 45 |
-
"Available_Sizes": "XS,S,M,L,XL,XXL",
|
| 46 |
-
"Fit": "Tailored",
|
| 47 |
-
"Color": "Navy",
|
| 48 |
-
"Pattern": "Solid",
|
| 49 |
-
"Sleeve_Length": "Long sleeve",
|
| 50 |
-
"Neckline": "Collared",
|
| 51 |
-
"Length": "Hip-length",
|
| 52 |
-
"Occasion": "Formal",
|
| 53 |
-
"Season": "All",
|
| 54 |
-
"Brand": "Professional",
|
| 55 |
-
"Description": "Timeless navy blazer for professional settings"
|
| 56 |
-
},
|
| 57 |
-
{
|
| 58 |
-
"Product_ID": "T001",
|
| 59 |
-
"Name": "Casual Cotton T-Shirt",
|
| 60 |
-
"Category": "Top",
|
| 61 |
-
"Price": 25,
|
| 62 |
-
"Available_Sizes": "XS,S,M,L,XL,XXL",
|
| 63 |
-
"Fit": "Relaxed",
|
| 64 |
-
"Color": "White",
|
| 65 |
-
"Pattern": "Solid",
|
| 66 |
-
"Sleeve_Length": "Short sleeve",
|
| 67 |
-
"Neckline": "Round neck",
|
| 68 |
-
"Length": "Hip-length",
|
| 69 |
-
"Occasion": "Casual",
|
| 70 |
-
"Season": "All",
|
| 71 |
-
"Brand": "BasicWear",
|
| 72 |
-
"Description": "Comfortable cotton t-shirt for everyday wear"
|
| 73 |
-
},
|
| 74 |
-
{
|
| 75 |
-
"Product_ID": "D003",
|
| 76 |
-
"Name": "Elegant Black Evening Dress",
|
| 77 |
-
"Category": "Dress",
|
| 78 |
-
"Price": 150,
|
| 79 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 80 |
-
"Fit": "Bodycon",
|
| 81 |
-
"Color": "Black",
|
| 82 |
-
"Pattern": "Solid",
|
| 83 |
-
"Sleeve_Length": "Sleeveless",
|
| 84 |
-
"Neckline": "V-neck",
|
| 85 |
-
"Length": "Maxi",
|
| 86 |
-
"Occasion": "Formal",
|
| 87 |
-
"Season": "All",
|
| 88 |
-
"Brand": "ElegantEvening",
|
| 89 |
-
"Description": "Sophisticated black dress perfect for formal events"
|
| 90 |
-
},
|
| 91 |
-
{
|
| 92 |
-
"Product_ID": "J001",
|
| 93 |
-
"Name": "High-Waisted Skinny Jeans",
|
| 94 |
-
"Category": "Jeans",
|
| 95 |
-
"Price": 65,
|
| 96 |
-
"Available_Sizes": "24,26,28,30,32,34",
|
| 97 |
-
"Fit": "Slim",
|
| 98 |
-
"Color": "Blue",
|
| 99 |
-
"Pattern": "Solid",
|
| 100 |
-
"Sleeve_Length": "N/A",
|
| 101 |
-
"Neckline": "N/A",
|
| 102 |
-
"Length": "Full-length",
|
| 103 |
-
"Occasion": "Casual",
|
| 104 |
-
"Season": "All",
|
| 105 |
-
"Brand": "DenimCo",
|
| 106 |
-
"Description": "Classic high-waisted skinny jeans in dark wash"
|
| 107 |
-
},
|
| 108 |
-
{
|
| 109 |
-
"Product_ID": "S001",
|
| 110 |
-
"Name": "Flowy Maxi Skirt",
|
| 111 |
-
"Category": "Skirt",
|
| 112 |
-
"Price": 45,
|
| 113 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 114 |
-
"Fit": "Relaxed",
|
| 115 |
-
"Color": "Mint Green",
|
| 116 |
-
"Pattern": "Solid",
|
| 117 |
-
"Sleeve_Length": "N/A",
|
| 118 |
-
"Neckline": "N/A",
|
| 119 |
-
"Length": "Maxi",
|
| 120 |
-
"Occasion": "Casual",
|
| 121 |
-
"Season": "Summer",
|
| 122 |
-
"Brand": "FlowingFashion",
|
| 123 |
-
"Description": "Comfortable maxi skirt with elastic waistband"
|
| 124 |
-
},
|
| 125 |
-
{
|
| 126 |
-
"Product_ID": "A001",
|
| 127 |
-
"Name": "Athletic Yoga Set",
|
| 128 |
-
"Category": "Activewear",
|
| 129 |
-
"Price": 55,
|
| 130 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 131 |
-
"Fit": "Athletic",
|
| 132 |
-
"Color": "Black",
|
| 133 |
-
"Pattern": "Solid",
|
| 134 |
-
"Sleeve_Length": "Long sleeve",
|
| 135 |
-
"Neckline": "Round neck",
|
| 136 |
-
"Length": "Full-length",
|
| 137 |
-
"Occasion": "Workout",
|
| 138 |
-
"Season": "All",
|
| 139 |
-
"Brand": "FitLife",
|
| 140 |
-
"Description": "High-performance yoga set with moisture-wicking fabric"
|
| 141 |
-
},
|
| 142 |
-
{
|
| 143 |
-
"Product_ID": "D004",
|
| 144 |
-
"Name": "Bohemian Maxi Dress",
|
| 145 |
-
"Category": "Dress",
|
| 146 |
-
"Price": 95,
|
| 147 |
-
"Available_Sizes": "S,M,L,XL",
|
| 148 |
-
"Fit": "Relaxed",
|
| 149 |
-
"Color": "Multi",
|
| 150 |
-
"Pattern": "Paisley",
|
| 151 |
-
"Sleeve_Length": "Long sleeve",
|
| 152 |
-
"Neckline": "V-neck",
|
| 153 |
-
"Length": "Maxi",
|
| 154 |
-
"Occasion": "Casual",
|
| 155 |
-
"Season": "Fall",
|
| 156 |
-
"Brand": "BohoChic",
|
| 157 |
-
"Description": "Free-spirited maxi dress with paisley print"
|
| 158 |
-
},
|
| 159 |
-
{
|
| 160 |
-
"Product_ID": "T002",
|
| 161 |
-
"Name": "Pastel Pink Blouse",
|
| 162 |
-
"Category": "Top",
|
| 163 |
-
"Price": 40,
|
| 164 |
-
"Available_Sizes": "XS,S,M,L,XL",
|
| 165 |
-
"Fit": "Tailored",
|
| 166 |
-
"Color": "Light Pink",
|
| 167 |
-
"Pattern": "Solid",
|
| 168 |
-
"Sleeve_Length": "Short sleeve",
|
| 169 |
-
"Neckline": "Button-up",
|
| 170 |
-
"Length": "Hip-length",
|
| 171 |
-
"Occasion": "Business Casual",
|
| 172 |
-
"Season": "Spring",
|
| 173 |
-
"Brand": "ProfessionalPastels",
|
| 174 |
-
"Description": "Soft pink blouse perfect for office wear"
|
| 175 |
-
}
|
| 176 |
-
]
|
| 177 |
-
|
| 178 |
-
# Create DataFrame and save to Excel
|
| 179 |
-
df = pd.DataFrame(catalog_data)
|
| 180 |
-
os.makedirs('data', exist_ok=True)
|
| 181 |
-
df.to_excel('data/Apparels_shared.xlsx', index=False)
|
| 182 |
-
print("Sample catalog created successfully!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
design.png
DELETED
Git LFS Details
|
env.example
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
# OpenAI API Configuration
|
| 2 |
-
openai_api_key='api-key'
|
| 3 |
-
|
| 4 |
-
# App Configuration
|
| 5 |
-
app_debug=False
|
| 6 |
-
similarity_threshold=0.8
|
| 7 |
-
|
| 8 |
-
# Data Configuration
|
| 9 |
-
catalog_file=data/Apparels_shared.xlsx
|
| 10 |
-
vibes_data_dir=data/vibes/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
image.png
DELETED
|
Binary file (78.9 kB)
|
|
|
recommendation_system.py
DELETED
|
@@ -1,349 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Main Vibe-to-Attribute Clothing Recommendation System
|
| 3 |
-
|
| 4 |
-
Coordinates all modules to provide end-to-end fashion recommendations
|
| 5 |
-
from user queries to natural language suggestions.
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
import os
|
| 9 |
-
import sys
|
| 10 |
-
from typing import Dict, List, Optional, Any
|
| 11 |
-
|
| 12 |
-
# Add modules to path
|
| 13 |
-
sys.path.append(os.path.join(os.path.dirname(__file__), 'modules'))
|
| 14 |
-
|
| 15 |
-
from modules.nlp_analyzer import NLPAnalyzer
|
| 16 |
-
from modules.similarity_matcher import SimilarityMatcher
|
| 17 |
-
from modules.gpt_inference import GPTInference
|
| 18 |
-
from modules.catalog_filter import CatalogFilter
|
| 19 |
-
from modules.nlg_generator import NLGGenerator
|
| 20 |
-
|
| 21 |
-
class VibeRecommendationSystem:
|
| 22 |
-
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
| 23 |
-
"""
|
| 24 |
-
Initialize the complete recommendation system.
|
| 25 |
-
|
| 26 |
-
Args:
|
| 27 |
-
config: Configuration dictionary with system settings
|
| 28 |
-
"""
|
| 29 |
-
self.config = config or {}
|
| 30 |
-
|
| 31 |
-
# Initialize all modules
|
| 32 |
-
self.nlp_analyzer = NLPAnalyzer()
|
| 33 |
-
self.similarity_matcher = SimilarityMatcher(
|
| 34 |
-
vibes_data_dir=self.config.get('vibes_data_dir', 'data/vibes/'),
|
| 35 |
-
similarity_threshold=self.config.get('similarity_threshold', 0.8)
|
| 36 |
-
)
|
| 37 |
-
self.gpt_inference = GPTInference()
|
| 38 |
-
self.catalog_filter = CatalogFilter(
|
| 39 |
-
catalog_file=self.config.get('catalog_file', 'data/Apparels_shared.xlsx')
|
| 40 |
-
)
|
| 41 |
-
self.nlg_generator = NLGGenerator()
|
| 42 |
-
|
| 43 |
-
print("✓ Vibe Recommendation System initialized successfully!")
|
| 44 |
-
|
| 45 |
-
def get_recommendations(self,
|
| 46 |
-
user_query: str,
|
| 47 |
-
user_preferences: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
| 48 |
-
"""
|
| 49 |
-
Main method to get clothing recommendations from a user query.
|
| 50 |
-
|
| 51 |
-
Args:
|
| 52 |
-
user_query: Natural language request from user
|
| 53 |
-
user_preferences: Additional user preferences (size, budget, etc.)
|
| 54 |
-
|
| 55 |
-
Returns:
|
| 56 |
-
Dictionary containing recommendations and processing details
|
| 57 |
-
"""
|
| 58 |
-
print(f"\n🔍 Processing query: '{user_query}'")
|
| 59 |
-
|
| 60 |
-
# Step 1: NLP Analysis
|
| 61 |
-
print("Step 1: Analyzing natural language query...")
|
| 62 |
-
nlp_result = self.nlp_analyzer.analyze_query(user_query)
|
| 63 |
-
extracted_attributes = nlp_result['extracted_attributes']
|
| 64 |
-
key_phrases = nlp_result['key_phrases']
|
| 65 |
-
|
| 66 |
-
print(f"✓ Extracted attributes: {extracted_attributes}")
|
| 67 |
-
print(f"✓ Key phrases: {key_phrases}")
|
| 68 |
-
|
| 69 |
-
# Step 2: Similarity Matching
|
| 70 |
-
print("\nStep 2: Matching against vibe knowledge base...")
|
| 71 |
-
similarity_result = self.similarity_matcher.find_best_matches(
|
| 72 |
-
extracted_phrases=key_phrases,
|
| 73 |
-
individual_attributes=extracted_attributes
|
| 74 |
-
)
|
| 75 |
-
|
| 76 |
-
rule_based_attributes = similarity_result['matched_attributes']
|
| 77 |
-
has_high_confidence = similarity_result['has_high_confidence_matches']
|
| 78 |
-
|
| 79 |
-
print(f"✓ Rule-based matches: {rule_based_attributes}")
|
| 80 |
-
print(f"✓ High confidence matches: {has_high_confidence}")
|
| 81 |
-
|
| 82 |
-
# Step 3: GPT Inference (if needed)
|
| 83 |
-
gpt_attributes = {}
|
| 84 |
-
if not has_high_confidence or len(rule_based_attributes) < 3:
|
| 85 |
-
print("\nStep 3: Using GPT for attribute inference...")
|
| 86 |
-
gpt_attributes = self.gpt_inference.infer_attributes(
|
| 87 |
-
user_query=user_query,
|
| 88 |
-
existing_attributes={**extracted_attributes, **rule_based_attributes},
|
| 89 |
-
vibe_mappings=self.similarity_matcher.vibe_mappings
|
| 90 |
-
)
|
| 91 |
-
|
| 92 |
-
if gpt_attributes:
|
| 93 |
-
print(f"✓ GPT inferred attributes: {gpt_attributes}")
|
| 94 |
-
else:
|
| 95 |
-
print("⚠ GPT inference not available or failed")
|
| 96 |
-
else:
|
| 97 |
-
print("\nStep 3: Skipping GPT inference (high confidence rule-based matches)")
|
| 98 |
-
|
| 99 |
-
# Step 4: Merge attributes
|
| 100 |
-
print("\nStep 4: Merging attributes...")
|
| 101 |
-
final_attributes = self._merge_all_attributes(
|
| 102 |
-
extracted_attributes,
|
| 103 |
-
rule_based_attributes,
|
| 104 |
-
gpt_attributes,
|
| 105 |
-
user_preferences or {}
|
| 106 |
-
)
|
| 107 |
-
|
| 108 |
-
print(f"✓ Final attributes: {final_attributes}")
|
| 109 |
-
|
| 110 |
-
# Step 5: Check for missing critical attributes
|
| 111 |
-
missing_attributes = self._check_missing_attributes(final_attributes)
|
| 112 |
-
if missing_attributes:
|
| 113 |
-
print(f"⚠ Missing critical attributes: {missing_attributes}")
|
| 114 |
-
|
| 115 |
-
# Generate user-friendly questions and use the first one as the main message
|
| 116 |
-
follow_up_questions = self._generate_follow_up_questions(missing_attributes)
|
| 117 |
-
user_friendly_message = follow_up_questions[0] if follow_up_questions else "I need more information to help you find the perfect clothing item."
|
| 118 |
-
|
| 119 |
-
return {
|
| 120 |
-
'success': False,
|
| 121 |
-
'message': user_friendly_message,
|
| 122 |
-
'missing_attributes': missing_attributes,
|
| 123 |
-
'suggested_questions': follow_up_questions,
|
| 124 |
-
'final_attributes': final_attributes,
|
| 125 |
-
'processing_details': {
|
| 126 |
-
'nlp_analysis': nlp_result,
|
| 127 |
-
'similarity_matching': similarity_result
|
| 128 |
-
}
|
| 129 |
-
}
|
| 130 |
-
|
| 131 |
-
# Step 6: Product Filtering
|
| 132 |
-
print("\nStep 5: Filtering product catalog...")
|
| 133 |
-
matching_products = self.catalog_filter.filter_products(
|
| 134 |
-
attributes=final_attributes,
|
| 135 |
-
max_results=self.config.get('max_results', 5)
|
| 136 |
-
)
|
| 137 |
-
|
| 138 |
-
print(f"✓ Found {len(matching_products)} matching products")
|
| 139 |
-
|
| 140 |
-
# Step 7: Natural Language Generation
|
| 141 |
-
print("\nStep 6: Generating natural language response...")
|
| 142 |
-
suggestion = self.nlg_generator.generate_suggestion(
|
| 143 |
-
products=matching_products,
|
| 144 |
-
original_query=user_query,
|
| 145 |
-
attributes=final_attributes
|
| 146 |
-
)
|
| 147 |
-
|
| 148 |
-
# Use the suggestion as-is (NLGGenerator already handles tone internally)
|
| 149 |
-
final_suggestion = suggestion
|
| 150 |
-
|
| 151 |
-
print("✓ Generated recommendation response")
|
| 152 |
-
|
| 153 |
-
return {
|
| 154 |
-
'success': True,
|
| 155 |
-
'recommendation': final_suggestion,
|
| 156 |
-
'products': matching_products,
|
| 157 |
-
'final_attributes': final_attributes,
|
| 158 |
-
'processing_details': {
|
| 159 |
-
'nlp_analysis': nlp_result,
|
| 160 |
-
'similarity_matching': similarity_result,
|
| 161 |
-
'gpt_inference': gpt_attributes,
|
| 162 |
-
'products_found': len(matching_products)
|
| 163 |
-
}
|
| 164 |
-
}
|
| 165 |
-
|
| 166 |
-
def _merge_all_attributes(self,
|
| 167 |
-
extracted: Dict[str, Any],
|
| 168 |
-
rule_based: Dict[str, Any],
|
| 169 |
-
gpt_inferred: Dict[str, Any],
|
| 170 |
-
user_prefs: Dict[str, Any]) -> Dict[str, Any]:
|
| 171 |
-
"""Merge attributes from all sources with proper priority."""
|
| 172 |
-
|
| 173 |
-
# Priority order: user_prefs > rule_based > gpt_inferred > extracted
|
| 174 |
-
merged = {}
|
| 175 |
-
|
| 176 |
-
# Start with extracted (lowest priority)
|
| 177 |
-
for key, value in extracted.items():
|
| 178 |
-
if value:
|
| 179 |
-
merged[key] = value
|
| 180 |
-
|
| 181 |
-
# Override with GPT inferred (handle None case)
|
| 182 |
-
if gpt_inferred:
|
| 183 |
-
for key, value in gpt_inferred.items():
|
| 184 |
-
if value:
|
| 185 |
-
merged[key] = value
|
| 186 |
-
|
| 187 |
-
# Override with rule-based (higher confidence)
|
| 188 |
-
for key, value in rule_based.items():
|
| 189 |
-
if value:
|
| 190 |
-
merged[key] = value
|
| 191 |
-
|
| 192 |
-
# Override with user preferences (highest priority)
|
| 193 |
-
for key, value in user_prefs.items():
|
| 194 |
-
if value:
|
| 195 |
-
merged[key] = value
|
| 196 |
-
|
| 197 |
-
return merged
|
| 198 |
-
|
| 199 |
-
def _check_missing_attributes(self, attributes: Dict[str, Any]) -> List[str]:
|
| 200 |
-
"""Check for missing critical attributes."""
|
| 201 |
-
critical_attributes = ['category', 'size', 'budget'] # Must have category, size, and budget
|
| 202 |
-
recommended_attributes = ['occasion', 'season'] # Good to have
|
| 203 |
-
|
| 204 |
-
missing = []
|
| 205 |
-
|
| 206 |
-
# Check critical attributes
|
| 207 |
-
for attr in critical_attributes:
|
| 208 |
-
value = attributes.get(attr)
|
| 209 |
-
is_missing = attr not in attributes or not value
|
| 210 |
-
print(f"✓ Checking {attr}: value={repr(value)}, type={type(value)}, is_missing={is_missing}")
|
| 211 |
-
if is_missing:
|
| 212 |
-
missing.append(attr)
|
| 213 |
-
|
| 214 |
-
# Check if we have at least some context (only if we have category, size, and budget)
|
| 215 |
-
if 'category' in attributes and 'size' in attributes and 'budget' in attributes:
|
| 216 |
-
context_attributes = ['occasion', 'season', 'style', 'fit']
|
| 217 |
-
has_context = any(attr in attributes and attributes[attr] for attr in context_attributes)
|
| 218 |
-
|
| 219 |
-
if not has_context:
|
| 220 |
-
missing.extend(['occasion or style'])
|
| 221 |
-
|
| 222 |
-
return missing
|
| 223 |
-
|
| 224 |
-
def _generate_follow_up_questions(self, missing_attributes: List[str]) -> List[str]:
|
| 225 |
-
"""Generate follow-up questions for missing attributes with helpful examples."""
|
| 226 |
-
questions = []
|
| 227 |
-
|
| 228 |
-
# Get available options from the system
|
| 229 |
-
available_categories = ["dress", "top", "pants", "skirt", "jacket", "shirt", "blouse"]
|
| 230 |
-
available_occasions = ["casual", "formal", "work", "party", "date", "wedding", "brunch", "vacation"]
|
| 231 |
-
available_styles = ["casual", "formal", "chic", "bohemian", "minimalist", "edgy", "romantic", "professional"]
|
| 232 |
-
available_fits = ["relaxed", "tailored", "loose", "fitted", "oversized", "slim", "regular"]
|
| 233 |
-
available_sizes = ["XS", "S", "M", "L", "XL", "XXL"]
|
| 234 |
-
|
| 235 |
-
question_templates = {
|
| 236 |
-
'category': f"What type of clothing are you looking for? Choose from: {', '.join(available_categories)}",
|
| 237 |
-
'size': f"What size do you need? Available sizes: {', '.join(available_sizes)}",
|
| 238 |
-
'budget': "What's your budget? You can say something like '$50', 'under $100', or '200 dollars'",
|
| 239 |
-
'occasion': f"What's the occasion? For example: {', '.join(available_occasions[:6])}",
|
| 240 |
-
'season': "What season is this for? (spring, summer, fall, winter)",
|
| 241 |
-
'style': f"What style are you going for? Options include: {', '.join(available_styles[:6])}",
|
| 242 |
-
'fit': f"How would you like it to fit? Choose from: {', '.join(available_fits[:4])}"
|
| 243 |
-
}
|
| 244 |
-
|
| 245 |
-
for attr in missing_attributes:
|
| 246 |
-
if attr in question_templates:
|
| 247 |
-
questions.append(question_templates[attr])
|
| 248 |
-
elif 'or' in attr: # Handle compound attributes like "occasion or style"
|
| 249 |
-
if 'occasion' in attr:
|
| 250 |
-
questions.append(f"Tell me about the occasion or style! For example: {', '.join(available_occasions[:4])} or {', '.join(available_styles[:4])}")
|
| 251 |
-
else:
|
| 252 |
-
questions.append(f"Could you tell me more about the {attr}? Give me some details!")
|
| 253 |
-
|
| 254 |
-
return questions
|
| 255 |
-
|
| 256 |
-
def get_system_status(self) -> Dict[str, Any]:
|
| 257 |
-
"""Get status of all system components."""
|
| 258 |
-
status = {
|
| 259 |
-
'nlp_analyzer': 'Ready',
|
| 260 |
-
'similarity_matcher': f"Loaded {len(self.similarity_matcher.all_vibe_keys)} vibe mappings",
|
| 261 |
-
'gpt_inference': 'Ready' if self.gpt_inference.available else 'Not configured (missing API key)',
|
| 262 |
-
'catalog_filter': f"Loaded {len(self.catalog_filter.catalog_df)} products" if not self.catalog_filter.catalog_df.empty else 'No catalog loaded',
|
| 263 |
-
'nlg_generator': 'Ready'
|
| 264 |
-
}
|
| 265 |
-
|
| 266 |
-
return status
|
| 267 |
-
|
| 268 |
-
def interactive_session(self):
|
| 269 |
-
"""Run an interactive recommendation session."""
|
| 270 |
-
print("🌟 Welcome to the Vibe-to-Attribute Clothing Recommendation System!")
|
| 271 |
-
print("Ask me for clothing recommendations using natural language.")
|
| 272 |
-
print("Type 'quit' to exit, 'status' to see system status.\n")
|
| 273 |
-
|
| 274 |
-
while True:
|
| 275 |
-
try:
|
| 276 |
-
user_input = input("👤 What are you looking for? ").strip()
|
| 277 |
-
|
| 278 |
-
if user_input.lower() in ['quit', 'exit', 'bye']:
|
| 279 |
-
print("👋 Thanks for using the recommendation system! Goodbye!")
|
| 280 |
-
break
|
| 281 |
-
|
| 282 |
-
if user_input.lower() == 'status':
|
| 283 |
-
status = self.get_system_status()
|
| 284 |
-
print("\n📊 System Status:")
|
| 285 |
-
for component, status_msg in status.items():
|
| 286 |
-
print(f" • {component}: {status_msg}")
|
| 287 |
-
print()
|
| 288 |
-
continue
|
| 289 |
-
|
| 290 |
-
if not user_input:
|
| 291 |
-
print("Please enter a clothing request or type 'quit' to exit.\n")
|
| 292 |
-
continue
|
| 293 |
-
|
| 294 |
-
# Get recommendations
|
| 295 |
-
result = self.get_recommendations(user_input)
|
| 296 |
-
|
| 297 |
-
if result['success']:
|
| 298 |
-
print(f"\n🤖 {result['recommendation']}\n")
|
| 299 |
-
else:
|
| 300 |
-
print(f"\n🤖 {result['message']}")
|
| 301 |
-
if 'suggested_questions' in result:
|
| 302 |
-
print("\nTo help me better, please answer:")
|
| 303 |
-
for question in result['suggested_questions']:
|
| 304 |
-
print(f" • {question}")
|
| 305 |
-
print()
|
| 306 |
-
|
| 307 |
-
except KeyboardInterrupt:
|
| 308 |
-
print("\n👋 Thanks for using the recommendation system! Goodbye!")
|
| 309 |
-
break
|
| 310 |
-
except Exception as e:
|
| 311 |
-
print(f"❌ An error occurred: {e}")
|
| 312 |
-
print("Please try again with a different request.\n")
|
| 313 |
-
|
| 314 |
-
# Example usage and testing
|
| 315 |
-
if __name__ == "__main__":
|
| 316 |
-
# Initialize system
|
| 317 |
-
system = VibeRecommendationSystem()
|
| 318 |
-
|
| 319 |
-
# Check system status
|
| 320 |
-
print("System Status:")
|
| 321 |
-
status = system.get_system_status()
|
| 322 |
-
for component, status_msg in status.items():
|
| 323 |
-
print(f" • {component}: {status_msg}")
|
| 324 |
-
|
| 325 |
-
# Test with sample queries
|
| 326 |
-
test_queries = [
|
| 327 |
-
"Something casual for a summer brunch",
|
| 328 |
-
"I need a formal black dress for an evening event",
|
| 329 |
-
"Comfortable workout clothes for the gym"
|
| 330 |
-
]
|
| 331 |
-
|
| 332 |
-
print("\n" + "="*50)
|
| 333 |
-
print("Testing with sample queries:")
|
| 334 |
-
print("="*50)
|
| 335 |
-
|
| 336 |
-
for query in test_queries:
|
| 337 |
-
print(f"\n🧪 Testing: '{query}'")
|
| 338 |
-
result = system.get_recommendations(query)
|
| 339 |
-
|
| 340 |
-
if result['success']:
|
| 341 |
-
print(f"✅ Success: {result['recommendation']}")
|
| 342 |
-
else:
|
| 343 |
-
print(f"❌ Failed: {result['message']}")
|
| 344 |
-
|
| 345 |
-
# Start interactive session if run directly
|
| 346 |
-
print("\n" + "="*50)
|
| 347 |
-
print("Starting interactive session...")
|
| 348 |
-
print("="*50)
|
| 349 |
-
system.interactive_session()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
|
@@ -1,12 +0,0 @@
|
|
| 1 |
-
streamlit>=1.28.0
|
| 2 |
-
spacy>=3.4.0
|
| 3 |
-
nltk>=3.8
|
| 4 |
-
pandas>=1.5.0
|
| 5 |
-
numpy>=1.21.0
|
| 6 |
-
scikit-learn>=1.2.0
|
| 7 |
-
openai>=1.0.0
|
| 8 |
-
python-dotenv>=0.19.0
|
| 9 |
-
openpyxl>=3.0.0
|
| 10 |
-
sentence-transformers>=2.2.0
|
| 11 |
-
setuptools>=65.0.0
|
| 12 |
-
en-core-web-md @ https://github.com/explosion/spacy-models/releases/download/en_core_web_md-3.8.0/en_core_web_md-3.8.0-py3-none-any.whl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run.py
DELETED
|
@@ -1,145 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Main run script for the Vibe-to-Attribute Clothing Recommendation System
|
| 4 |
-
|
| 5 |
-
This script:
|
| 6 |
-
1. Sets up the environment
|
| 7 |
-
2. Creates the sample product catalog
|
| 8 |
-
3. Downloads required models
|
| 9 |
-
4. Starts the Streamlit application
|
| 10 |
-
"""
|
| 11 |
-
|
| 12 |
-
import os
|
| 13 |
-
import sys
|
| 14 |
-
import subprocess
|
| 15 |
-
import platform
|
| 16 |
-
from pathlib import Path
|
| 17 |
-
|
| 18 |
-
def print_banner():
|
| 19 |
-
"""Print welcome banner."""
|
| 20 |
-
banner = """
|
| 21 |
-
╔══════════════════════════════════════════════════════════════════════════════╗
|
| 22 |
-
║ ║
|
| 23 |
-
║ 🌟 Vibe-to-Attribute Clothing Recommendation System 🌟 ║
|
| 24 |
-
║ ║
|
| 25 |
-
║ Transform your style ideas into perfect outfit recommendations using AI! ║
|
| 26 |
-
║ ║
|
| 27 |
-
╚══════════════════════════════════════════════════════════════════════════════╝
|
| 28 |
-
"""
|
| 29 |
-
print(banner)
|
| 30 |
-
|
| 31 |
-
def check_python_version():
|
| 32 |
-
"""Check if Python version is compatible."""
|
| 33 |
-
min_version = (3, 8)
|
| 34 |
-
current_version = sys.version_info[:2]
|
| 35 |
-
|
| 36 |
-
if current_version < min_version:
|
| 37 |
-
print(f"❌ Python {min_version[0]}.{min_version[1]}+ is required. Current version: {current_version[0]}.{current_version[1]}")
|
| 38 |
-
return False
|
| 39 |
-
|
| 40 |
-
print(f"✅ Python version: {current_version[0]}.{current_version[1]}")
|
| 41 |
-
return True
|
| 42 |
-
|
| 43 |
-
def install_requirements():
|
| 44 |
-
"""Install required Python packages."""
|
| 45 |
-
print("\n📦 Installing required packages...")
|
| 46 |
-
|
| 47 |
-
try:
|
| 48 |
-
subprocess.check_call([
|
| 49 |
-
sys.executable, "-m", "pip", "install", "-r", "requirements.txt"
|
| 50 |
-
])
|
| 51 |
-
print("✅ Requirements installed successfully!")
|
| 52 |
-
return True
|
| 53 |
-
except subprocess.CalledProcessError as e:
|
| 54 |
-
print(f"❌ Failed to install requirements: {e}")
|
| 55 |
-
return False
|
| 56 |
-
|
| 57 |
-
def create_sample_catalog():
|
| 58 |
-
"""Create the sample product catalog."""
|
| 59 |
-
print("\n📊 Creating sample product catalog...")
|
| 60 |
-
|
| 61 |
-
try:
|
| 62 |
-
# Run the catalog creation script
|
| 63 |
-
exec(open('create_catalog.py').read())
|
| 64 |
-
print("✅ Sample catalog created successfully!")
|
| 65 |
-
return True
|
| 66 |
-
except Exception as e:
|
| 67 |
-
print(f"❌ Failed to create sample catalog: {e}")
|
| 68 |
-
return False
|
| 69 |
-
|
| 70 |
-
def check_environment():
|
| 71 |
-
"""Check if environment file exists and provide guidance."""
|
| 72 |
-
print("\n🔧 Checking environment configuration...")
|
| 73 |
-
|
| 74 |
-
env_file = Path('.env')
|
| 75 |
-
env_example = Path('env.example')
|
| 76 |
-
|
| 77 |
-
if not env_file.exists():
|
| 78 |
-
if env_example.exists():
|
| 79 |
-
print("⚠️ No .env file found. Please:")
|
| 80 |
-
print(" 1. Copy env.example to .env")
|
| 81 |
-
print(" 2. Edit .env with your API keys")
|
| 82 |
-
print(" 3. Set your OpenAI API key for GPT features")
|
| 83 |
-
print("\n Example:")
|
| 84 |
-
print(" cp env.example .env")
|
| 85 |
-
print(" # Then edit .env with your actual API keys")
|
| 86 |
-
else:
|
| 87 |
-
print("⚠️ No environment configuration found.")
|
| 88 |
-
|
| 89 |
-
print("\n💡 The system will work with limited functionality without API keys.")
|
| 90 |
-
print(" GPT inference will be disabled, but rule-based matching will work.")
|
| 91 |
-
return False
|
| 92 |
-
else:
|
| 93 |
-
print("✅ Environment file found!")
|
| 94 |
-
return True
|
| 95 |
-
|
| 96 |
-
def start_streamlit():
|
| 97 |
-
"""Start the Streamlit application."""
|
| 98 |
-
print("\n🚀 Starting Streamlit application...")
|
| 99 |
-
print("📍 The app will open in your default web browser")
|
| 100 |
-
print("\n⏹️ Press Ctrl+C to stop the application")
|
| 101 |
-
|
| 102 |
-
try:
|
| 103 |
-
subprocess.run([
|
| 104 |
-
sys.executable, "-m", "streamlit", "run", "streamlit_app.py",])
|
| 105 |
-
except KeyboardInterrupt:
|
| 106 |
-
print("\n👋 Application stopped by user")
|
| 107 |
-
except Exception as e:
|
| 108 |
-
print(f"❌ Failed to start Streamlit: {e}")
|
| 109 |
-
|
| 110 |
-
def main():
|
| 111 |
-
"""Main execution function."""
|
| 112 |
-
print_banner()
|
| 113 |
-
|
| 114 |
-
print("🔍 System Check...")
|
| 115 |
-
|
| 116 |
-
# Check Python version
|
| 117 |
-
if not check_python_version():
|
| 118 |
-
sys.exit(1)
|
| 119 |
-
|
| 120 |
-
# Check if we're in the right directory
|
| 121 |
-
if not Path("requirements.txt").exists():
|
| 122 |
-
print("❌ Please run this script from the project root directory")
|
| 123 |
-
sys.exit(1)
|
| 124 |
-
|
| 125 |
-
print("✅ In correct directory")
|
| 126 |
-
|
| 127 |
-
# Install requirements
|
| 128 |
-
if not install_requirements():
|
| 129 |
-
print("⚠️ Continuing with existing packages...")
|
| 130 |
-
|
| 131 |
-
# Create sample catalog
|
| 132 |
-
create_sample_catalog()
|
| 133 |
-
|
| 134 |
-
# Check environment
|
| 135 |
-
check_environment()
|
| 136 |
-
|
| 137 |
-
print("\n" + "="*80)
|
| 138 |
-
print("🎉 Setup complete! Starting the application...")
|
| 139 |
-
print("="*80)
|
| 140 |
-
|
| 141 |
-
# Start Streamlit
|
| 142 |
-
start_streamlit()
|
| 143 |
-
|
| 144 |
-
if __name__ == "__main__":
|
| 145 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
streamlit_app.py
DELETED
|
@@ -1,342 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Streamlit Web Application for Vibe-to-Attribute Clothing Recommendation System
|
| 3 |
-
"""
|
| 4 |
-
|
| 5 |
-
import streamlit as st
|
| 6 |
-
import sys
|
| 7 |
-
import os
|
| 8 |
-
from typing import Dict, Any, List
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
try:
|
| 12 |
-
from recommendation_system import VibeRecommendationSystem
|
| 13 |
-
except ImportError as e:
|
| 14 |
-
st.error(f"Failed to import recommendation system: {e}")
|
| 15 |
-
st.stop()
|
| 16 |
-
|
| 17 |
-
# Configure Streamlit page
|
| 18 |
-
st.set_page_config(
|
| 19 |
-
page_title="Vibe Fashion Recommender",
|
| 20 |
-
page_icon="👗",
|
| 21 |
-
layout="wide",
|
| 22 |
-
initial_sidebar_state="expanded"
|
| 23 |
-
)
|
| 24 |
-
|
| 25 |
-
# Custom CSS for better styling
|
| 26 |
-
st.markdown("""
|
| 27 |
-
<style>
|
| 28 |
-
.main-header {
|
| 29 |
-
font-size: 3rem;
|
| 30 |
-
font-weight: bold;
|
| 31 |
-
text-align: center;
|
| 32 |
-
background: linear-gradient(45deg, #FF6B6B, #4ECDC4);
|
| 33 |
-
-webkit-background-clip: text;
|
| 34 |
-
-webkit-text-fill-color: transparent;
|
| 35 |
-
margin-bottom: 2rem;
|
| 36 |
-
}
|
| 37 |
-
.subtitle {
|
| 38 |
-
text-align: center;
|
| 39 |
-
font-size: 1.2rem;
|
| 40 |
-
color: #666;
|
| 41 |
-
margin-bottom: 3rem;
|
| 42 |
-
}
|
| 43 |
-
.recommendation-box {
|
| 44 |
-
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| 45 |
-
color: white;
|
| 46 |
-
padding: 2rem;
|
| 47 |
-
border-radius: 15px;
|
| 48 |
-
margin: 2rem 0;
|
| 49 |
-
}
|
| 50 |
-
</style>
|
| 51 |
-
""", unsafe_allow_html=True)
|
| 52 |
-
|
| 53 |
-
# Initialize session state for conversation
|
| 54 |
-
if 'recommendation_system' not in st.session_state:
|
| 55 |
-
with st.spinner("🔄 Initializing Fashion Recommendation System..."):
|
| 56 |
-
try:
|
| 57 |
-
st.session_state.recommendation_system = VibeRecommendationSystem()
|
| 58 |
-
st.session_state.system_initialized = True
|
| 59 |
-
except Exception as e:
|
| 60 |
-
st.session_state.system_initialized = False
|
| 61 |
-
st.error(f"Failed to initialize system: {e}")
|
| 62 |
-
|
| 63 |
-
# Initialize conversation state
|
| 64 |
-
if 'conversation_history' not in st.session_state:
|
| 65 |
-
st.session_state.conversation_history = []
|
| 66 |
-
if 'pending_attributes' not in st.session_state:
|
| 67 |
-
st.session_state.pending_attributes = {}
|
| 68 |
-
if 'missing_attributes' not in st.session_state:
|
| 69 |
-
st.session_state.missing_attributes = []
|
| 70 |
-
if 'conversation_active' not in st.session_state:
|
| 71 |
-
st.session_state.conversation_active = False
|
| 72 |
-
|
| 73 |
-
def main():
|
| 74 |
-
"""Main Streamlit application."""
|
| 75 |
-
|
| 76 |
-
# Header
|
| 77 |
-
st.markdown('<h1 class="main-header">�� Vibe Fashion Recommender</h1>', unsafe_allow_html=True)
|
| 78 |
-
st.markdown('<p class="subtitle">Transform your style ideas into perfect outfit recommendations using AI</p>', unsafe_allow_html=True)
|
| 79 |
-
|
| 80 |
-
# Check if system is initialized
|
| 81 |
-
if not st.session_state.get('system_initialized', False):
|
| 82 |
-
st.error("❌ System failed to initialize. Please refresh the page.")
|
| 83 |
-
return
|
| 84 |
-
|
| 85 |
-
# Sidebar - System Status Only
|
| 86 |
-
with st.sidebar:
|
| 87 |
-
st.header("📊 System Status")
|
| 88 |
-
if st.button("Check Status"):
|
| 89 |
-
status = st.session_state.recommendation_system.get_system_status()
|
| 90 |
-
for component, status_msg in status.items():
|
| 91 |
-
if "Ready" in status_msg or "Loaded" in status_msg:
|
| 92 |
-
st.success(f"✅ {component}: {status_msg}")
|
| 93 |
-
else:
|
| 94 |
-
st.warning(f"⚠️ {component}: {status_msg}")
|
| 95 |
-
|
| 96 |
-
st.markdown("---")
|
| 97 |
-
st.markdown("**💡 Tip:** Tell me your size and budget in the conversation!")
|
| 98 |
-
st.markdown("*Example: 'I need a size M dress under $100 for a party'*")
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
# Chat interface for conversation
|
| 103 |
-
st.header("💬 Fashion Chat")
|
| 104 |
-
|
| 105 |
-
# Display conversation history
|
| 106 |
-
if st.session_state.conversation_history:
|
| 107 |
-
st.markdown("**Conversation History:**")
|
| 108 |
-
chat_container = st.container()
|
| 109 |
-
with chat_container:
|
| 110 |
-
for i, exchange in enumerate(st.session_state.conversation_history):
|
| 111 |
-
# User message
|
| 112 |
-
st.markdown(f"**You:** {exchange['user']}")
|
| 113 |
-
# Assistant response
|
| 114 |
-
if exchange.get('assistant'):
|
| 115 |
-
st.markdown(f"**Assistant:** {exchange['assistant']}")
|
| 116 |
-
if i < len(st.session_state.conversation_history) - 1:
|
| 117 |
-
st.markdown("---")
|
| 118 |
-
|
| 119 |
-
# Current pending context
|
| 120 |
-
if st.session_state.pending_attributes:
|
| 121 |
-
st.info(f"💭 I remember: {', '.join([f'{k}: {v}' for k, v in st.session_state.pending_attributes.items()])}")
|
| 122 |
-
|
| 123 |
-
# User input with form to handle clearing better
|
| 124 |
-
with st.form(key="user_input_form", clear_on_submit=True):
|
| 125 |
-
user_input = st.text_input(
|
| 126 |
-
"Continue the conversation:" if st.session_state.conversation_active else "What are you looking for?",
|
| 127 |
-
placeholder="Tell me more details..." if st.session_state.missing_attributes else "e.g., I want something elegant for a dinner date...",
|
| 128 |
-
key="user_input_field"
|
| 129 |
-
)
|
| 130 |
-
send_button = st.form_submit_button("💬 Send", type="primary")
|
| 131 |
-
|
| 132 |
-
# Buttons
|
| 133 |
-
col1, col2 = st.columns([1, 1])
|
| 134 |
-
|
| 135 |
-
with col1:
|
| 136 |
-
if st.button("🔄 New Request"):
|
| 137 |
-
# Reset conversation
|
| 138 |
-
st.session_state.conversation_history = []
|
| 139 |
-
st.session_state.pending_attributes = {}
|
| 140 |
-
st.session_state.missing_attributes = []
|
| 141 |
-
st.session_state.conversation_active = False
|
| 142 |
-
st.rerun()
|
| 143 |
-
|
| 144 |
-
with col2:
|
| 145 |
-
if st.button("✨ Get Final Recommendations"):
|
| 146 |
-
if st.session_state.pending_attributes:
|
| 147 |
-
get_final_recommendations(st.session_state.pending_attributes, {})
|
| 148 |
-
else:
|
| 149 |
-
st.warning("Please start a conversation first!")
|
| 150 |
-
|
| 151 |
-
# Process user input (now triggered by form submit)
|
| 152 |
-
if send_button and user_input.strip():
|
| 153 |
-
process_user_input(user_input, {})
|
| 154 |
-
|
| 155 |
-
def process_user_input(user_input: str, user_prefs: Dict[str, Any]):
|
| 156 |
-
"""Process user input in conversational context."""
|
| 157 |
-
|
| 158 |
-
# Combine current input with pending attributes to form complete query
|
| 159 |
-
if st.session_state.pending_attributes:
|
| 160 |
-
# Build context from previous conversation
|
| 161 |
-
context_parts = []
|
| 162 |
-
for key, value in st.session_state.pending_attributes.items():
|
| 163 |
-
if isinstance(value, list):
|
| 164 |
-
context_parts.append(f"{key}: {', '.join(value)}")
|
| 165 |
-
else:
|
| 166 |
-
context_parts.append(f"{key}: {value}")
|
| 167 |
-
|
| 168 |
-
# Combine context with new input
|
| 169 |
-
combined_query = f"Previous context: {'; '.join(context_parts)}. New information: {user_input}"
|
| 170 |
-
else:
|
| 171 |
-
combined_query = user_input
|
| 172 |
-
|
| 173 |
-
with st.spinner("🤖 Processing your message..."):
|
| 174 |
-
try:
|
| 175 |
-
# Get recommendations with combined context and pending attributes
|
| 176 |
-
# Pass pending attributes as user preferences (highest priority)
|
| 177 |
-
merged_prefs = user_prefs.copy()
|
| 178 |
-
merged_prefs.update(st.session_state.pending_attributes)
|
| 179 |
-
|
| 180 |
-
result = st.session_state.recommendation_system.get_recommendations(
|
| 181 |
-
user_query=combined_query,
|
| 182 |
-
user_preferences=merged_prefs
|
| 183 |
-
)
|
| 184 |
-
|
| 185 |
-
# Add to conversation history
|
| 186 |
-
exchange = {"user": user_input}
|
| 187 |
-
|
| 188 |
-
if result['success']:
|
| 189 |
-
# Got successful recommendations
|
| 190 |
-
exchange["assistant"] = result['recommendation']
|
| 191 |
-
st.session_state.conversation_history.append(exchange)
|
| 192 |
-
st.session_state.conversation_active = False
|
| 193 |
-
st.session_state.pending_attributes = {}
|
| 194 |
-
st.session_state.missing_attributes = []
|
| 195 |
-
|
| 196 |
-
# Display recommendations
|
| 197 |
-
display_recommendations(result)
|
| 198 |
-
|
| 199 |
-
else:
|
| 200 |
-
# Need more information - maintain conversation state
|
| 201 |
-
exchange["assistant"] = result['message']
|
| 202 |
-
st.session_state.conversation_history.append(exchange)
|
| 203 |
-
st.session_state.conversation_active = True
|
| 204 |
-
st.session_state.missing_attributes = result.get('missing_attributes', [])
|
| 205 |
-
|
| 206 |
-
# Update pending attributes with what we know so far
|
| 207 |
-
if 'final_attributes' in result:
|
| 208 |
-
st.session_state.pending_attributes.update(result['final_attributes'])
|
| 209 |
-
|
| 210 |
-
# Extract new attributes from current input only (not combined query)
|
| 211 |
-
try:
|
| 212 |
-
# Analyze just the current user input to extract new attributes
|
| 213 |
-
nlp_result = st.session_state.recommendation_system.nlp_analyzer.analyze_query(user_input)
|
| 214 |
-
if nlp_result and 'extracted_attributes' in nlp_result:
|
| 215 |
-
extracted = nlp_result['extracted_attributes']
|
| 216 |
-
for key, value in extracted.items():
|
| 217 |
-
if value and value not in [None, "", []]:
|
| 218 |
-
st.session_state.pending_attributes[key] = value
|
| 219 |
-
print(f"✓ Updated pending attributes from current input: {key} = {value}")
|
| 220 |
-
# Show debug info for size and budget specifically
|
| 221 |
-
if key in ['size', 'budget']:
|
| 222 |
-
print(f" {key.title()} type: {type(value)}, value: {repr(value)}")
|
| 223 |
-
except Exception as e:
|
| 224 |
-
print(f"Error extracting attributes from current input: {e}")
|
| 225 |
-
pass
|
| 226 |
-
|
| 227 |
-
# Show the assistant's response
|
| 228 |
-
st.info(f"💬 **Assistant:** {exchange['assistant']}")
|
| 229 |
-
|
| 230 |
-
# Show follow-up questions
|
| 231 |
-
if 'suggested_questions' in result:
|
| 232 |
-
st.markdown("**To help me better, please answer:**")
|
| 233 |
-
for question in result['suggested_questions']:
|
| 234 |
-
st.write(f"• {question}")
|
| 235 |
-
|
| 236 |
-
st.rerun()
|
| 237 |
-
|
| 238 |
-
except Exception as e:
|
| 239 |
-
st.error(f"❌ An error occurred: {str(e)}")
|
| 240 |
-
|
| 241 |
-
def get_final_recommendations(pending_attributes: Dict[str, Any], user_prefs: Dict[str, Any]):
|
| 242 |
-
"""Get final recommendations with accumulated attributes."""
|
| 243 |
-
|
| 244 |
-
# Build a query from accumulated attributes
|
| 245 |
-
query_parts = []
|
| 246 |
-
for key, value in pending_attributes.items():
|
| 247 |
-
if isinstance(value, list):
|
| 248 |
-
query_parts.append(f"{key}: {', '.join(value)}")
|
| 249 |
-
else:
|
| 250 |
-
query_parts.append(f"{key}: {value}")
|
| 251 |
-
|
| 252 |
-
combined_query = f"Find clothing with: {'; '.join(query_parts)}"
|
| 253 |
-
|
| 254 |
-
with st.spinner("🤖 Getting your final recommendations..."):
|
| 255 |
-
try:
|
| 256 |
-
result = st.session_state.recommendation_system.get_recommendations(
|
| 257 |
-
user_query=combined_query,
|
| 258 |
-
user_preferences=user_prefs
|
| 259 |
-
)
|
| 260 |
-
|
| 261 |
-
if result['success']:
|
| 262 |
-
# Reset conversation state
|
| 263 |
-
st.session_state.conversation_active = False
|
| 264 |
-
st.session_state.pending_attributes = {}
|
| 265 |
-
st.session_state.missing_attributes = []
|
| 266 |
-
|
| 267 |
-
# Display recommendations
|
| 268 |
-
display_recommendations(result)
|
| 269 |
-
else:
|
| 270 |
-
st.error(f"Still missing information: {result.get('message', 'Unknown error')}")
|
| 271 |
-
|
| 272 |
-
except Exception as e:
|
| 273 |
-
st.error(f"❌ An error occurred: {str(e)}")
|
| 274 |
-
|
| 275 |
-
def get_recommendations(user_query: str, user_prefs: Dict[str, Any]):
|
| 276 |
-
"""Get and display recommendations (legacy function for compatibility)."""
|
| 277 |
-
|
| 278 |
-
with st.spinner("🤖 Analyzing your request and finding perfect matches..."):
|
| 279 |
-
try:
|
| 280 |
-
result = st.session_state.recommendation_system.get_recommendations(
|
| 281 |
-
user_query=user_query,
|
| 282 |
-
user_preferences=user_prefs
|
| 283 |
-
)
|
| 284 |
-
|
| 285 |
-
display_recommendations(result)
|
| 286 |
-
|
| 287 |
-
except Exception as e:
|
| 288 |
-
st.error(f"❌ An error occurred: {str(e)}")
|
| 289 |
-
|
| 290 |
-
def display_recommendations(result: Dict[str, Any]):
|
| 291 |
-
"""Display the recommendation results."""
|
| 292 |
-
|
| 293 |
-
if result['success']:
|
| 294 |
-
# Main recommendation
|
| 295 |
-
st.markdown('<div class="recommendation-box">', unsafe_allow_html=True)
|
| 296 |
-
st.markdown("### 🎉 Your Perfect Match!")
|
| 297 |
-
st.markdown(result['recommendation'])
|
| 298 |
-
st.markdown('</div>', unsafe_allow_html=True)
|
| 299 |
-
|
| 300 |
-
# Product details
|
| 301 |
-
if result['products']:
|
| 302 |
-
st.header("👕 Product Details")
|
| 303 |
-
|
| 304 |
-
for i, product in enumerate(result['products']):
|
| 305 |
-
with st.expander(f"🛍️ {product['name']} - ${product['price']}", expanded=(i==0)):
|
| 306 |
-
col1, col2 = st.columns(2)
|
| 307 |
-
|
| 308 |
-
with col1:
|
| 309 |
-
st.write(f"**Category:** {product['category']}")
|
| 310 |
-
st.write(f"**Price:** ${product['price']}")
|
| 311 |
-
if product.get('fit'):
|
| 312 |
-
st.write(f"**Fit:** {product['fit']}")
|
| 313 |
-
if product.get('fabric'):
|
| 314 |
-
st.write(f"**Fabric:** {product['fabric']}")
|
| 315 |
-
|
| 316 |
-
with col2:
|
| 317 |
-
if product.get('color_or_print'):
|
| 318 |
-
st.write(f"**Color/Print:** {product['color_or_print']}")
|
| 319 |
-
st.write(f"**Available Sizes:** {product['available_sizes']}")
|
| 320 |
-
if product.get('sleeve_length'):
|
| 321 |
-
st.write(f"**Sleeve Length:** {product['sleeve_length']}")
|
| 322 |
-
if product.get('neckline'):
|
| 323 |
-
st.write(f"**Neckline:** {product['neckline']}")
|
| 324 |
-
if product.get('length'):
|
| 325 |
-
st.write(f"**Length:** {product['length']}")
|
| 326 |
-
if product.get('pant_type'):
|
| 327 |
-
st.write(f"**Pant Type:** {product['pant_type']}")
|
| 328 |
-
if product.get('occasion'):
|
| 329 |
-
st.write(f"**Occasion:** {product['occasion']}")
|
| 330 |
-
|
| 331 |
-
if product.get('description'):
|
| 332 |
-
st.write(f"**Description:** {product['description']}")
|
| 333 |
-
|
| 334 |
-
else:
|
| 335 |
-
st.error(result['message'])
|
| 336 |
-
if 'suggested_questions' in result:
|
| 337 |
-
st.markdown("**Please help me by answering:**")
|
| 338 |
-
for question in result['suggested_questions']:
|
| 339 |
-
st.write(f"• {question}")
|
| 340 |
-
|
| 341 |
-
if __name__ == "__main__":
|
| 342 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
test_system.py
DELETED
|
@@ -1,165 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python3
|
| 2 |
-
"""
|
| 3 |
-
Quick test script to verify the Vibe-to-Attribute Clothing Recommendation System
|
| 4 |
-
is working correctly without requiring all dependencies.
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
import os
|
| 8 |
-
import sys
|
| 9 |
-
from pathlib import Path
|
| 10 |
-
|
| 11 |
-
def test_file_structure():
|
| 12 |
-
"""Test that all required files exist."""
|
| 13 |
-
print("🔍 Testing file structure...")
|
| 14 |
-
|
| 15 |
-
required_files = [
|
| 16 |
-
'requirements.txt',
|
| 17 |
-
'env.example',
|
| 18 |
-
'streamlit_app.py',
|
| 19 |
-
'create_catalog.py',
|
| 20 |
-
'data/Apparels_shared.xlsx',
|
| 21 |
-
'data/vibes/fit_mapping.json',
|
| 22 |
-
'data/vibes/color_mapping.json',
|
| 23 |
-
'data/vibes/occasion_mapping.json',
|
| 24 |
-
'src/recommendation_system.py',
|
| 25 |
-
'modules/nlp_analyzer.py',
|
| 26 |
-
'modules/similarity_matcher.py',
|
| 27 |
-
'modules/gpt_inference.py',
|
| 28 |
-
'modules/catalog_filter.py',
|
| 29 |
-
'modules/nlg_generator.py'
|
| 30 |
-
]
|
| 31 |
-
|
| 32 |
-
missing_files = []
|
| 33 |
-
for file_path in required_files:
|
| 34 |
-
if not Path(file_path).exists():
|
| 35 |
-
missing_files.append(file_path)
|
| 36 |
-
else:
|
| 37 |
-
print(f" ✅ {file_path}")
|
| 38 |
-
|
| 39 |
-
if missing_files:
|
| 40 |
-
print(f"\n❌ Missing files:")
|
| 41 |
-
for file_path in missing_files:
|
| 42 |
-
print(f" - {file_path}")
|
| 43 |
-
return False
|
| 44 |
-
|
| 45 |
-
print("✅ All required files present!")
|
| 46 |
-
return True
|
| 47 |
-
|
| 48 |
-
def test_basic_imports():
|
| 49 |
-
"""Test that basic imports work."""
|
| 50 |
-
print("\n🔍 Testing basic imports...")
|
| 51 |
-
|
| 52 |
-
try:
|
| 53 |
-
# Test data loading
|
| 54 |
-
import json
|
| 55 |
-
|
| 56 |
-
# Test vibe mappings
|
| 57 |
-
with open('data/vibes/fit_mapping.json', 'r') as f:
|
| 58 |
-
fit_data = json.load(f)
|
| 59 |
-
print(f" ✅ Fit mappings loaded: {len(fit_data)} entries")
|
| 60 |
-
|
| 61 |
-
with open('data/vibes/color_mapping.json', 'r') as f:
|
| 62 |
-
color_data = json.load(f)
|
| 63 |
-
print(f" ✅ Color mappings loaded: {len(color_data)} entries")
|
| 64 |
-
|
| 65 |
-
with open('data/vibes/occasion_mapping.json', 'r') as f:
|
| 66 |
-
occasion_data = json.load(f)
|
| 67 |
-
print(f" ✅ Occasion mappings loaded: {len(occasion_data)} entries")
|
| 68 |
-
|
| 69 |
-
return True
|
| 70 |
-
|
| 71 |
-
except Exception as e:
|
| 72 |
-
print(f"❌ Import test failed: {e}")
|
| 73 |
-
return False
|
| 74 |
-
|
| 75 |
-
def test_catalog_loading():
|
| 76 |
-
"""Test that the catalog can be loaded."""
|
| 77 |
-
print("\n🔍 Testing catalog loading...")
|
| 78 |
-
|
| 79 |
-
try:
|
| 80 |
-
import pandas as pd
|
| 81 |
-
|
| 82 |
-
catalog_df = pd.read_excel('data/Apparels_shared.xlsx')
|
| 83 |
-
print(f" ✅ Catalog loaded: {len(catalog_df)} products")
|
| 84 |
-
|
| 85 |
-
# Check required columns
|
| 86 |
-
required_columns = ['Name', 'Category', 'Price', 'Available_Sizes']
|
| 87 |
-
missing_columns = [col for col in required_columns if col not in catalog_df.columns]
|
| 88 |
-
|
| 89 |
-
if missing_columns:
|
| 90 |
-
print(f" ⚠️ Missing columns: {missing_columns}")
|
| 91 |
-
else:
|
| 92 |
-
print(" ✅ All required columns present")
|
| 93 |
-
|
| 94 |
-
return True
|
| 95 |
-
|
| 96 |
-
except Exception as e:
|
| 97 |
-
print(f"❌ Catalog test failed: {e}")
|
| 98 |
-
return False
|
| 99 |
-
|
| 100 |
-
def test_system_initialization():
|
| 101 |
-
"""Test basic system initialization."""
|
| 102 |
-
print("\n🔍 Testing system initialization...")
|
| 103 |
-
|
| 104 |
-
try:
|
| 105 |
-
# Add paths
|
| 106 |
-
sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
|
| 107 |
-
sys.path.append(os.path.join(os.path.dirname(__file__), 'modules'))
|
| 108 |
-
|
| 109 |
-
# Test basic module imports (without dependencies that might fail)
|
| 110 |
-
from catalog_filter import CatalogFilter
|
| 111 |
-
|
| 112 |
-
# Test catalog filter
|
| 113 |
-
catalog_filter = CatalogFilter()
|
| 114 |
-
print(" ✅ Catalog filter initialized")
|
| 115 |
-
|
| 116 |
-
# Test basic filtering
|
| 117 |
-
summary = catalog_filter.get_catalog_summary()
|
| 118 |
-
print(f" ✅ Catalog summary: {summary.get('total_products', 0)} products")
|
| 119 |
-
|
| 120 |
-
return True
|
| 121 |
-
|
| 122 |
-
except Exception as e:
|
| 123 |
-
print(f"❌ System initialization test failed: {e}")
|
| 124 |
-
print(f" This might be due to missing dependencies - install with: pip install -r requirements.txt")
|
| 125 |
-
return False
|
| 126 |
-
|
| 127 |
-
def main():
|
| 128 |
-
"""Run all tests."""
|
| 129 |
-
print("🌟 Vibe-to-Attribute Clothing Recommendation System - Test Suite")
|
| 130 |
-
print("=" * 70)
|
| 131 |
-
|
| 132 |
-
tests = [
|
| 133 |
-
test_file_structure,
|
| 134 |
-
test_basic_imports,
|
| 135 |
-
test_catalog_loading,
|
| 136 |
-
test_system_initialization
|
| 137 |
-
]
|
| 138 |
-
|
| 139 |
-
passed = 0
|
| 140 |
-
total = len(tests)
|
| 141 |
-
|
| 142 |
-
for test_func in tests:
|
| 143 |
-
try:
|
| 144 |
-
if test_func():
|
| 145 |
-
passed += 1
|
| 146 |
-
except Exception as e:
|
| 147 |
-
print(f"❌ Test {test_func.__name__} failed with exception: {e}")
|
| 148 |
-
|
| 149 |
-
print("\n" + "=" * 70)
|
| 150 |
-
print(f"🏁 Test Results: {passed}/{total} tests passed")
|
| 151 |
-
|
| 152 |
-
if passed == total:
|
| 153 |
-
print("🎉 All tests passed! System is ready to run.")
|
| 154 |
-
print("\n🚀 To start the application, run:")
|
| 155 |
-
print(" python3 run.py")
|
| 156 |
-
else:
|
| 157 |
-
print("⚠️ Some tests failed. Please check the issues above.")
|
| 158 |
-
print("\n🔧 Try installing dependencies:")
|
| 159 |
-
print(" pip install -r requirements.txt")
|
| 160 |
-
|
| 161 |
-
return passed == total
|
| 162 |
-
|
| 163 |
-
if __name__ == "__main__":
|
| 164 |
-
success = main()
|
| 165 |
-
sys.exit(0 if success else 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vercel.json
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"version": 2,
|
| 3 |
-
"builds": [
|
| 4 |
-
{ "src": "Dockerfile", "use": "@vercel/docker" }
|
| 5 |
-
],
|
| 6 |
-
"routes": [
|
| 7 |
-
{ "src": "/(.*)", "dest": "/" }
|
| 8 |
-
]
|
| 9 |
-
}
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|