File size: 8,366 Bytes
d87825e
dae35de
fd929f3
 
 
 
bb6501e
3661d37
bb6501e
d87825e
 
89b44b4
eac149d
 
 
 
80f71ae
d87825e
 
eac149d
dae35de
d87825e
dae35de
eac149d
 
fd929f3
eac149d
7927c3b
d87825e
 
eac149d
d87825e
fd929f3
789627e
7927c3b
 
 
 
 
 
 
 
 
 
 
 
789627e
80f71ae
7927c3b
b5d09ec
80f71ae
 
c446fbc
 
 
 
 
 
 
 
 
 
c79fe94
789627e
c446fbc
 
789627e
c79fe94
 
73418cf
789627e
c79fe94
73418cf
 
 
 
 
 
c79fe94
789627e
 
c446fbc
c79fe94
789627e
 
c446fbc
 
789627e
c446fbc
 
c79fe94
 
789627e
 
 
c79fe94
 
789627e
 
c446fbc
c79fe94
789627e
c79fe94
 
 
 
c1b1497
73418cf
c1b1497
 
 
 
 
7927c3b
c1b1497
 
 
789627e
c1b1497
 
 
789627e
 
c1b1497
 
789627e
 
 
 
 
 
c1b1497
87153fd
c79fe94
789627e
c79fe94
dae35de
 
 
789627e
dae35de
 
789627e
 
 
 
dae35de
789627e
c1b1497
c79fe94
c1b1497
dae35de
c446fbc
dae35de
89b44b4
789627e
c1b1497
89b44b4
789627e
dae35de
 
e105139
dae35de
89b44b4
fd929f3
 
c79fe94
80f71ae
73418cf
 
dae35de
 
55cdf87
80f71ae
d87825e
73418cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84f4c93
789627e
73418cf
d87825e
789627e
 
55cdf87
789627e
 
d87825e
789627e
fd929f3
789627e
fd929f3
3661d37
fd929f3
3661d37
 
789627e
 
73418cf
789627e
3661d37
 
 
789627e
 
 
 
 
 
 
 
 
3661d37
 
 
 
 
 
 
789627e
 
 
 
 
 
 
 
 
 
 
 
 
 
73418cf
 
 
 
 
84f4c93
73418cf
 
 
 
 
 
 
 
 
fd929f3
789627e
fd929f3
789627e
 
 
 
fd929f3
789627e
d87825e
 
fd929f3
73418cf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
import torch
from transformers import AutoProcessor, AutoModelForImageTextToText, BitsAndBytesConfig
import datasets
from datasets import Dataset
from typing import cast
import os
import shutil
import multiprocessing as mp
from PIL import Image


def load_model(model_name, device_id=0):
    bnb_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_compute_dtype=torch.bfloat16,
        bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True,
    )

    processor = AutoProcessor.from_pretrained(model_name)
    processor.tokenizer.padding_side = "left"

    model = AutoModelForImageTextToText.from_pretrained(
        model_name,
        quantization_config=bnb_config,
        dtype=torch.bfloat16,
        device_map={"": device_id},
        attn_implementation="flash_attention_2",
    )

    return processor, model


def getTemplate(processor):
    msg = [
        {
            "role": "user",
            "content": [
                {"type": "image"},
                {
                    "type": "text",
                    "text": "Describe the image concisely, and skip mentioning that it's illustrated or from anime.",
                },
            ],
        }
    ]

    return processor.apply_chat_template(
        msg, add_generation_prompt=True, tokenize=False
    )


def preprocess_example_batch(examples, text):
    processed_images = []

    for image in examples["image"]:
        if isinstance(image, Image.Image):
            if image.mode != "RGB":
                image = image.convert("RGB")
            processed_images.append(image)
        else:
            raise ValueError("Image must be a PIL Image")

    return {
        "image": processed_images,
        "text": [text] * len(processed_images),
    }


def run_preprocessing(input_dataset, output_dir, num_proc=32, batch_size=100, start_idx=0, end_idx=None):
    print("Loading dataset for preprocessing...")
    ds = datasets.load_dataset(input_dataset, split="train")
    
    if end_idx is None:
        end_idx = len(ds)
    
    print(f"Selecting range [{start_idx}:{end_idx}]...")
    ds = ds.select(range(start_idx, end_idx))

    print("Loading processor...")
    processor = AutoProcessor.from_pretrained("datalab-to/chandra")
    text = getTemplate(processor)

    print("Running preprocessing...")
    processed_ds = ds.map(
        lambda ex: preprocess_example_batch(ex, text),
        remove_columns=[col for col in ds.column_names if col not in ["image", "text"]],
        num_proc=num_proc,
        batched=True,
        batch_size=batch_size,
    )

    print(f"Saving preprocessed dataset to {output_dir}...")
    processed_ds.save_to_disk(output_dir)
    print("Preprocessing done.")


def caption_batch(batch, processor, model):
    images = batch["image"]
    texts = batch["text"]

    inputs = processor(text=texts, images=images, return_tensors="pt", padding=True)

    inputs = {
        k: v.pin_memory().to(model.device, non_blocking=True) for k, v in inputs.items()
    }

    with torch.no_grad(), torch.amp.autocast("cuda", dtype=torch.bfloat16):
        generated = model.generate(
            **inputs,
            max_new_tokens=128,
            do_sample=False,
        )

    decoded = processor.batch_decode(generated, skip_special_tokens=False)

    captions = []
    special_tokens = set(processor.tokenizer.all_special_tokens)
    for d in decoded:
        if "<|im_start|>assistant" in d:
            d = d.split("<|im_start|>assistant")[-1]

        for token in special_tokens:
            d = d.replace(token, "")

        d = d.strip()
        captions.append(d)

    return {
        "text": captions,
    }


def process_shard(
    gpu_id, start, end, model_name, batch_size, input_dataset, output_file
):
    try:
        torch.cuda.set_device(gpu_id)

        print(f"[GPU {gpu_id}] Loading model...", flush=True)
        processor, model = load_model(model_name, gpu_id)

        print(f"[GPU {gpu_id}] Loading data shard [{start}:{end}]...", flush=True)
        loaded = datasets.load_from_disk(input_dataset).select(range(start, end))

        shard = cast(Dataset, loaded)

        print(f"[GPU {gpu_id}] Processing {len(shard)} examples...", flush=True)
        result = shard.map(
            lambda batch: caption_batch(batch, processor, model),
            batched=True,
            batch_size=batch_size,
            remove_columns=["text"],
        )

        print(f"[GPU {gpu_id}] Saving results to {output_file}...", flush=True)
        result.save_to_disk(output_file)

        print(f"[GPU {gpu_id}] Done!", flush=True)
        return output_file
    except Exception as e:
        print(f"[GPU {gpu_id}] Error: {e}", flush=True)
        raise


def main():
    mp.set_start_method("spawn", force=True)

    init_stage = os.environ.get("INIT", "0")
    
    input_dataset = "none-yet/anime-captions"
    output_dataset = "nroggendorff/anime-captions"
    model_name = "datalab-to/chandra"
    batch_size = 20

    print(f"Running stage INIT={init_stage}")
    
    full_ds = datasets.load_dataset(input_dataset, split="train")
    total_dataset_size = len(full_ds)
    midpoint = total_dataset_size // 2
    
    if init_stage == "0":
        print(f"Stage 0: Processing first half [0:{midpoint}]")
        preprocessed_dataset = "temp_preprocessed_0"
        start_idx = 0
        end_idx = midpoint
        final_output = f"{output_dataset}_part0"
    else:
        print(f"Stage 1: Processing second half [{midpoint}:{total_dataset_size}]")
        preprocessed_dataset = "temp_preprocessed_1"
        start_idx = midpoint
        end_idx = total_dataset_size
        final_output = input_dataset

    if not os.path.exists(preprocessed_dataset):
        run_preprocessing(input_dataset, preprocessed_dataset, start_idx=start_idx, end_idx=end_idx)

    print("Loading preprocessed dataset...")
    ds = datasets.load_from_disk(preprocessed_dataset)
    num_gpus = torch.cuda.device_count()
    total_size = len(ds)
    shard_size = total_size // num_gpus

    print(f"Dataset size: {total_size}")
    print(f"Using {num_gpus} GPUs")
    print(f"Shard size: {shard_size}")

    processes = []
    temp_files = []

    for i in range(num_gpus):
        start = i * shard_size
        end = start + shard_size if i < num_gpus - 1 else total_size
        output_file = f"temp_shard_{init_stage}_{i}"
        temp_files.append(output_file)

        p = mp.Process(
            target=process_shard,
            args=(
                i,
                start,
                end,
                model_name,
                batch_size,
                preprocessed_dataset,
                output_file,
            ),
        )
        p.start()
        processes.append(p)

    for p in processes:
        p.join()
        if p.exitcode != 0:
            print(f"\nProcess failed with exit code {p.exitcode}", flush=True)
            print("Terminating all processes...", flush=True)
            for proc in processes:
                if proc.is_alive():
                    proc.terminate()
            for proc in processes:
                proc.join()
            raise RuntimeError(f"At least one process failed")

    print("\nAll processes completed. Loading and concatenating results...")

    shards = [cast(Dataset, datasets.load_from_disk(f)) for f in temp_files]
    final_ds = datasets.concatenate_datasets(shards)

    print(f"Final dataset size: {len(final_ds)}")
    
    if init_stage == "0":
        print(f"Pushing first half to {final_output}...")
        final_ds.push_to_hub(final_output, create_pr=False)
    else:
        print("Loading first half from hub...")
        first_half = datasets.load_dataset(f"{output_dataset}_part0", split="train")
        
        print("Concatenating both halves...")
        complete_ds = datasets.concatenate_datasets([first_half, final_ds])
        
        print(f"Complete dataset size: {len(complete_ds)}")
        print(f"Pushing complete dataset to {final_output} with PR...")
        complete_ds.push_to_hub(final_output, create_pr=True)

    print("Cleaning up temporary files...")
    for f in temp_files:
        if os.path.exists(f):
            shutil.rmtree(f)
    if os.path.exists(preprocessed_dataset):
        shutil.rmtree(preprocessed_dataset)

    print("Done!")


if __name__ == "__main__":
    main()