|
|
|
|
|
import os, json, requests, numpy as np, tensorflow as tf
|
|
|
from tensorflow.keras import layers, Model
|
|
|
import sentencepiece as spm
|
|
|
from tqdm import tqdm
|
|
|
|
|
|
|
|
|
TOKENIZER_PATH = "bpe.model"
|
|
|
DATA_PATH = "dataset_shuffled.jsonl"
|
|
|
MODEL_PATH = "encoder_fit.weights.h5"
|
|
|
MAX_LEN = 384
|
|
|
EMBED_DIM = 512
|
|
|
LATENT_DIM = 512
|
|
|
BATCH_SIZE = 768
|
|
|
EPOCHS = 1
|
|
|
SHUFFLE_BUFFER = 200000
|
|
|
LEARNING_RATE = 5e-4
|
|
|
TEMPERATURE = 0.05
|
|
|
SEED = 42
|
|
|
|
|
|
np.random.seed(SEED)
|
|
|
tf.random.set_seed(SEED)
|
|
|
tf.get_logger().setLevel("ERROR")
|
|
|
|
|
|
|
|
|
try:
|
|
|
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="local")
|
|
|
tf.tpu.experimental.initialize_tpu_system(resolver)
|
|
|
strategy = tf.distribute.TPUStrategy(resolver)
|
|
|
ON_TPU = True
|
|
|
print("โ
TPU ์ด๊ธฐํ ์๋ฃ")
|
|
|
except Exception as e:
|
|
|
strategy = tf.distribute.get_strategy()
|
|
|
ON_TPU = False
|
|
|
print("โ ๏ธ TPU ๋ฏธ์ฌ์ฉ, GPU/CPU ์งํ:", e)
|
|
|
|
|
|
from tensorflow.keras import mixed_precision
|
|
|
policy = mixed_precision.Policy("mixed_bfloat16" if ON_TPU else "float32")
|
|
|
mixed_precision.set_global_policy(policy)
|
|
|
print("Mixed precision policy:", policy)
|
|
|
|
|
|
|
|
|
sp = spm.SentencePieceProcessor()
|
|
|
sp.load(TOKENIZER_PATH)
|
|
|
pad_id = sp.piece_to_id("<pad>")
|
|
|
if pad_id == -1:
|
|
|
pad_id = 0
|
|
|
vocab_size = sp.get_piece_size()
|
|
|
print("vocab_size:", vocab_size, "pad_id:", pad_id)
|
|
|
|
|
|
def encode_sentence_np(s: str, max_len=MAX_LEN):
|
|
|
ids = sp.encode(s, out_type=int)[:max_len]
|
|
|
if len(ids) < max_len:
|
|
|
ids = ids + [pad_id] * (max_len - len(ids))
|
|
|
return np.array(ids, dtype=np.int32)
|
|
|
|
|
|
|
|
|
class DynamicConv(layers.Layer):
|
|
|
def __init__(self, d_model, k=7):
|
|
|
super().__init__()
|
|
|
assert k % 2 == 1
|
|
|
self.k = k
|
|
|
self.dense = layers.Dense(d_model, activation='silu')
|
|
|
self.proj = layers.Dense(d_model)
|
|
|
self.generator = layers.Dense(k, dtype='float32')
|
|
|
def call(self, x):
|
|
|
x_in = x
|
|
|
x = tf.cast(x, tf.float32)
|
|
|
B = tf.shape(x)[0]; L = tf.shape(x)[1]; D = tf.shape(x)[2]
|
|
|
kernels = self.generator(self.dense(x))
|
|
|
kernels = tf.nn.softmax(kernels, axis=-1)
|
|
|
pad = (self.k - 1) // 2
|
|
|
x_pad = tf.pad(x, [[0,0],[pad,pad],[0,0]])
|
|
|
x_pad_4d = tf.expand_dims(x_pad, axis=1)
|
|
|
patches = tf.image.extract_patches(images=x_pad_4d,
|
|
|
sizes=[1,1,self.k,1],
|
|
|
strides=[1,1,1,1],
|
|
|
rates=[1,1,1,1],
|
|
|
padding='VALID')
|
|
|
patches = tf.reshape(patches, [B, L, self.k, D])
|
|
|
out = tf.reduce_sum(patches * tf.expand_dims(kernels, -1), axis=2)
|
|
|
out = self.proj(out)
|
|
|
return tf.cast(out, x_in.dtype)
|
|
|
|
|
|
class EncoderBlock(layers.Layer):
|
|
|
def __init__(self, embed_dim=EMBED_DIM, ff_dim=1152, num_conv_layers=2):
|
|
|
super().__init__()
|
|
|
self.fc1 = layers.Dense(ff_dim)
|
|
|
self.fc2 = layers.Dense(embed_dim)
|
|
|
self.blocks = [DynamicConv(d_model=embed_dim, k=7) for _ in range(num_conv_layers)]
|
|
|
self.ln = layers.LayerNormalization(epsilon=1e-5)
|
|
|
self.ln1 = layers.LayerNormalization(epsilon=1e-5)
|
|
|
self.ln2 = layers.LayerNormalization(epsilon=1e-5)
|
|
|
def call(self, x, training=None):
|
|
|
x_norm = self.ln(x)
|
|
|
out = x_norm
|
|
|
for block in self.blocks:
|
|
|
out = block(out)
|
|
|
x = x_norm + self.ln1(out)
|
|
|
v = out
|
|
|
h = self.fc1(v)
|
|
|
g, v_split = tf.split(h, 2, axis=-1)
|
|
|
h = tf.nn.silu(g) * v_split
|
|
|
h = self.fc2(h)
|
|
|
x = x + self.ln2(h)
|
|
|
return x
|
|
|
|
|
|
class L2NormLayer(layers.Layer):
|
|
|
def __init__(self, axis=1, epsilon=1e-10):
|
|
|
super().__init__()
|
|
|
self.axis = axis
|
|
|
self.epsilon = epsilon
|
|
|
def call(self, inputs):
|
|
|
return tf.math.l2_normalize(inputs, axis=self.axis, epsilon=self.epsilon)
|
|
|
|
|
|
class SentenceEncoder(Model):
|
|
|
def __init__(self, vocab_size, embed_dim=EMBED_DIM, latent_dim=LATENT_DIM, max_len=MAX_LEN, pad_id=pad_id, dropout_rate=0.1):
|
|
|
super().__init__()
|
|
|
self.pad_id = pad_id
|
|
|
self.embed = layers.Embedding(vocab_size, embed_dim)
|
|
|
self.pos_embed = layers.Embedding(input_dim=max_len, output_dim=embed_dim)
|
|
|
self.dropout = layers.Dropout(dropout_rate)
|
|
|
self.blocks = [EncoderBlock() for _ in range(2)]
|
|
|
self.attn_pool = layers.Dense(1)
|
|
|
self.ln_f = layers.LayerNormalization(epsilon=1e-5, dtype=tf.float32)
|
|
|
self.latent = layers.Dense(latent_dim)
|
|
|
self.l2norm = L2NormLayer(axis=1)
|
|
|
def call(self, x, training=None):
|
|
|
positions = tf.range(tf.shape(x)[1])[tf.newaxis, :]
|
|
|
x_embed = self.embed(x) + self.pos_embed(positions)
|
|
|
x_embed = self.dropout(x_embed, training=training)
|
|
|
mask = tf.cast(tf.not_equal(x, self.pad_id), tf.float32)
|
|
|
h = x_embed
|
|
|
for block in self.blocks:
|
|
|
h = block(h, training=training)
|
|
|
h = self.ln_f(h)
|
|
|
scores = self.attn_pool(h)
|
|
|
scores = tf.cast(scores, tf.float32)
|
|
|
scores = tf.where(mask[..., tf.newaxis] == 0, tf.constant(-1e9, tf.float32), scores)
|
|
|
scores = tf.nn.softmax(scores, axis=1)
|
|
|
pooled = tf.reduce_sum(h * scores, axis=1)
|
|
|
latent = self.latent(pooled)
|
|
|
latent = self.l2norm(latent)
|
|
|
return tf.cast(latent, tf.float32)
|
|
|
|
|
|
|
|
|
with strategy.scope():
|
|
|
encoder = SentenceEncoder(vocab_size=vocab_size)
|
|
|
|
|
|
encoder(np.zeros((1, MAX_LEN), dtype=np.int32))
|
|
|
|
|
|
if os.path.exists(MODEL_PATH):
|
|
|
try:
|
|
|
encoder.load_weights(MODEL_PATH)
|
|
|
print("Loaded weights from", MODEL_PATH)
|
|
|
except Exception as e:
|
|
|
print("Warning: load_weights failed:", e)
|
|
|
|
|
|
encoder.trainable = False
|
|
|
|
|
|
head_layers = []
|
|
|
for name in ("attn_pool", "ln_f", "latent"):
|
|
|
layer = getattr(encoder, name, None)
|
|
|
if layer is None:
|
|
|
print(f"Warning: encoder has no attribute '{name}'")
|
|
|
else:
|
|
|
layer.trainable = True
|
|
|
head_layers.append(layer)
|
|
|
|
|
|
encoder(np.zeros((1, MAX_LEN), dtype=np.int32))
|
|
|
|
|
|
trainable_vars = []
|
|
|
for layer in head_layers:
|
|
|
|
|
|
for v in layer.trainable_weights:
|
|
|
trainable_vars.append(v)
|
|
|
|
|
|
if len(trainable_vars) == 0:
|
|
|
print("ERROR: no head trainable vars found. Dumping all variables:")
|
|
|
for v in encoder.variables:
|
|
|
print(v.name, "shape", v.shape, "trainable:", v.trainable)
|
|
|
raise RuntimeError("No trainable head variables found - aborting.")
|
|
|
total_trainable = sum(int(np.prod(v.shape)) for v in trainable_vars)
|
|
|
print("Collected head layers:", [l.name for l in head_layers])
|
|
|
print("Trainable var count (head):", len(trainable_vars), "params:", total_trainable)
|
|
|
|
|
|
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
|
|
|
|
|
|
|
|
|
AUTOTUNE = tf.data.AUTOTUNE
|
|
|
|
|
|
def _py_encode_line(line):
|
|
|
raw = line.numpy()
|
|
|
if isinstance(raw, bytes):
|
|
|
s = raw.decode("utf-8")
|
|
|
else:
|
|
|
s = str(raw)
|
|
|
j = json.loads(s)
|
|
|
q = encode_sentence_np(j.get("query",""))
|
|
|
d = encode_sentence_np(j.get("document",""))
|
|
|
n = encode_sentence_np(j.get("hard_negative",""))
|
|
|
return q, d, n
|
|
|
|
|
|
def parse_line(line):
|
|
|
q,d,n = tf.py_function(_py_encode_line, [line], [tf.int32, tf.int32, tf.int32])
|
|
|
q.set_shape([MAX_LEN]); d.set_shape([MAX_LEN]); n.set_shape([MAX_LEN])
|
|
|
return q,d,n
|
|
|
|
|
|
ds = tf.data.TextLineDataset(DATA_PATH)
|
|
|
ds = ds.map(lambda x: tf.strings.strip(x), num_parallel_calls=AUTOTUNE)
|
|
|
ds = ds.filter(lambda x: tf.not_equal(x, ""))
|
|
|
ds = ds.map(parse_line, num_parallel_calls=AUTOTUNE)
|
|
|
ds = ds.shuffle(SHUFFLE_BUFFER, seed=SEED)
|
|
|
ds = ds.repeat()
|
|
|
ds = ds.batch(BATCH_SIZE, drop_remainder=True)
|
|
|
ds = ds.prefetch(AUTOTUNE)
|
|
|
|
|
|
|
|
|
try:
|
|
|
sample = next(iter(ds.take(1)))
|
|
|
print("Sample batch shapes:", [t.shape for t in sample])
|
|
|
except Exception as e:
|
|
|
print("Warning: sample extraction failed:", e)
|
|
|
|
|
|
|
|
|
@tf.function
|
|
|
def compute_loss_and_logits(q_emb, p_emb, n_emb, temperature):
|
|
|
docs = tf.concat([p_emb, n_emb], axis=0)
|
|
|
logits = tf.matmul(q_emb, docs, transpose_b=True)
|
|
|
logits = logits / tf.cast(temperature, logits.dtype)
|
|
|
labels = tf.range(tf.shape(q_emb)[0], dtype=tf.int32)
|
|
|
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
|
|
|
return tf.reduce_mean(loss), logits
|
|
|
|
|
|
|
|
|
@tf.function
|
|
|
def train_step(q_batch, p_batch, n_batch):
|
|
|
def step_fn(q, p, n):
|
|
|
with tf.GradientTape() as tape:
|
|
|
q_emb = encoder(q, training=True)
|
|
|
p_emb = encoder(p, training=True)
|
|
|
n_emb = encoder(n, training=True)
|
|
|
loss, _ = compute_loss_and_logits(q_emb, p_emb, n_emb, TEMPERATURE)
|
|
|
reg_loss = tf.add_n(encoder.losses) if encoder.losses else 0.0
|
|
|
total_loss = loss + reg_loss
|
|
|
grads = tape.gradient(total_loss, trainable_vars)
|
|
|
|
|
|
grads = [tf.zeros_like(v) if g is None else g for g, v in zip(grads, trainable_vars)]
|
|
|
optimizer.apply_gradients(zip(grads, trainable_vars))
|
|
|
return total_loss
|
|
|
per_replica_loss = strategy.run(step_fn, args=(q_batch, p_batch, n_batch))
|
|
|
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=None)
|
|
|
|
|
|
|
|
|
with open(DATA_PATH, "r", encoding="utf-8") as f:
|
|
|
num_lines = sum(1 for _ in f)
|
|
|
steps_per_epoch = max(1, num_lines // BATCH_SIZE)
|
|
|
print("num_lines:", num_lines, "steps_per_epoch:", steps_per_epoch)
|
|
|
|
|
|
it = iter(ds)
|
|
|
global_step = 0
|
|
|
for epoch in range(EPOCHS):
|
|
|
print(f"\nEpoch {epoch+1}/{EPOCHS}")
|
|
|
pbar = tqdm(range(steps_per_epoch), desc="training", ncols=120)
|
|
|
for step in pbar:
|
|
|
batch = next(it)
|
|
|
loss = train_step(batch[0], batch[1], batch[2])
|
|
|
global_step += 1
|
|
|
pbar.set_postfix({"loss": f"{float(loss.numpy()):.4f}"})
|
|
|
encoder.save_weights(MODEL_PATH)
|
|
|
print("Saved weights:", MODEL_PATH)
|
|
|
|
|
|
print("Training finished.")
|
|
|
|