Spaces:
Sleeping
Sleeping
File size: 6,455 Bytes
8526b04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
"""
Build script optimized for Hugging Face Spaces deployment
Maintains the exact same SOTA RAG architecture
"""
import os
import sys
import logging
import pickle
import json
import numpy as np
import torch
from pathlib import Path
# Add parent directory to path
sys.path.append('.')
from app import (
load_opc_datasets,
build_retrieval_system,
ARTIFACT_DIR,
FAISS_AVAILABLE,
MODEL_NAME,
EMBED_MODEL,
MAX_CORPUS_SIZE
)
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler('/data/build.log')
]
)
logger = logging.getLogger(__name__)
def check_artifacts():
"""Check if artifacts already exist"""
required_files = [
"corpus_data.json",
"corpus_embeddings.npy",
"answer_embeddings.npy",
"bm25.pkl"
]
if FAISS_AVAILABLE:
required_files.append("faiss_index.bin")
all_exist = all(os.path.exists(os.path.join(ARTIFACT_DIR, f)) for f in required_files)
return all_exist
def build_retrieval_with_progress():
"""Build retrieval system with progress tracking"""
logger.info("Building SOTA RAG Retrieval System for Coding Assistant")
logger.info(f"Architecture: HyDE + Query Rewriting + Multi-Query + Answer-Space Retrieval")
logger.info(f"Embedding Model: {EMBED_MODEL}")
logger.info(f"Max Corpus Size: {MAX_CORPUS_SIZE}")
# Load datasets
logger.info("Loading coding datasets...")
ds_map = load_opc_datasets()
# Build retrieval system (using the exact same function from app.py)
logger.info("Building retrieval system...")
retrieval_system = build_retrieval_system(ds_map)
logger.info("Retrieval system built successfully!")
logger.info(f" - Corpus size: {len(retrieval_system.corpus_texts)}")
logger.info(f" - Embedding dimension: {retrieval_system.corpus_embeddings.shape[1]}")
logger.info(f" - FAISS index: {'Yes' if retrieval_system.faiss_index else 'No'}")
return retrieval_system
def prepare_llm_artifacts():
"""Prepare LLM artifacts without downloading the full model"""
logger.info("π€ Preparing LLM configuration...")
from transformers import AutoTokenizer, GenerationConfig
llm_path = os.path.join(ARTIFACT_DIR, "llm_model")
os.makedirs(llm_path, exist_ok=True)
# Download and save tokenizer
logger.info(f"π₯ Downloading tokenizer for {MODEL_NAME}...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Use the exact same chat template from app.py
tokenizer.chat_template = (
"{% for message in messages %}"
"{{'<|'+message['role']+'|>\\n'+message['content']+'</s>\\n'}}"
"{% endfor %}"
"{% if add_generation_prompt %}"
"<|assistant|>\n"
"{% endif %}"
)
# Use the exact same generation config from app.py
generation_config = GenerationConfig(
max_new_tokens=300,
temperature=0.7,
top_p=0.9,
do_sample=True,
repetition_penalty=1.15,
pad_token_id=tokenizer.pad_token_id
)
# Save tokenizer and config
tokenizer.save_pretrained(llm_path)
generation_config.save_pretrained(llm_path)
# Create minimal config file
config = {
"_name_or_path": MODEL_NAME,
"architectures": ["LlamaForCausalLM"],
"model_type": "llama",
"torch_dtype": "float16",
"quantization_config": {
"load_in_4bit": True,
"bnb_4bit_compute_dtype": "float32",
"bnb_4bit_use_double_quant": True,
"bnb_4bit_quant_type": "nf4"
} if torch.cuda.is_available() else {}
}
config_path = os.path.join(llm_path, "config.json")
with open(config_path, "w") as f:
json.dump(config, f, indent=2)
logger.info(f"LLM configuration saved to {llm_path}")
logger.info("Note: Full model will be downloaded at runtime with 4-bit quantization")
def verify_artifacts():
"""Verify all artifacts are properly built"""
logger.info("Verifying artifacts...")
files_to_check = {
"corpus_data.json": "Corpus data",
"corpus_embeddings.npy": "Question embeddings",
"answer_embeddings.npy": "Answer embeddings",
"bm25.pkl": "BM25 index",
"faiss_index.bin": "FAISS index"
}
for file, description in files_to_check.items():
path = os.path.join(ARTIFACT_DIR, file)
if os.path.exists(path):
size_mb = os.path.getsize(path) / (1024 * 1024)
logger.info(f" β {description}: {size_mb:.2f} MB")
else:
if file != "faiss_index.bin" or FAISS_AVAILABLE:
logger.warning(f" β Missing: {description}")
def main():
"""Main build process"""
logger.info("=" * 60)
logger.info("π€ Codey Bryant 3.0 - SOTA RAG Build Script")
logger.info("=" * 60)
# Create artifacts directory
os.makedirs(ARTIFACT_DIR, exist_ok=True)
# Check if we need to rebuild
if check_artifacts():
logger.info("Artifacts already exist. Skipping build.")
logger.info("Delete artifacts to force rebuild.")
else:
logger.info("Building fresh artifacts...")
# Build retrieval system
build_retrieval_with_progress()
# Prepare LLM artifacts
prepare_llm_artifacts()
logger.info("Build complete!")
# Verify artifacts
verify_artifacts()
# Show total size
logger.info("\nArtifact Summary:")
total_size = 0
for root, dirs, files in os.walk(ARTIFACT_DIR):
for file in files:
filepath = os.path.join(root, file)
size_mb = os.path.getsize(filepath) / (1024 * 1024)
total_size += size_mb
logger.info(f" Total size: {total_size:.2f} MB")
logger.info("=" * 60)
logger.info("Ready to launch Codey Bryant!")
logger.info(" Run: python app.py")
logger.info("=" * 60)
if __name__ == "__main__":
main() |