not run colab t4

#1
by asdgad - opened

from transformers import AutoProcessor, AutoModelForImageTextToText, BitsAndBytesConfig
import torch

مسح ذاكرة التخزين المؤقت لـ CUDA (Clear CUDA cache)

if torch.cuda.is_available():
torch.cuda.empty_cache()
print("CUDA cache cleared.")

messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://raw.githubusercontent.com/zheny2751-dotcom/UI2Code-N/main/assets/example.png"
},
{
"type": "text",
"text": "Based on the domtree and the page screenshot, please identify which interactive components in the image require interaction. Please note that if similar buttons have been clicked on similar pages in the past, do not click them again, and also do not select buttons that are obscured on the page."
}
]
}
]
processor = AutoProcessor.from_pretrained("zai-org/WebVIA-Agent", use_fast=True)

تعريف إعدادات التكميم 4-بت (Define 4-bit quantization settings)

quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=True
)

model = AutoModelForImageTextToText.from_pretrained(
pretrained_model_name_or_path="zai-org/WebVIA-Agent",
dtype=torch.bfloat16, # تم تغيير torch_dtype إلى dtype
device_map="auto", # تغيير device_map إلى "auto" للسماح بالتوزيع التلقائي
quantization_config=quantization_config # استخدام BitsAndBytesConfig هنا
)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=1)
output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
print(output_text)

CUDA cache cleared.

Loading checkpoint shards: 100%
 4/4 [01:46<00:00, 27.24s/it]


OutOfMemoryError Traceback (most recent call last)

/tmp/ipython-input-955071952.py in <cell line: 0>()
45 return_tensors="pt"
46 ).to(model.device)
---> 47 generated_ids = model.generate(**inputs, max_new_tokens=1)
48 output_text = processor.decode(generated_ids[0][inputs["input_ids"].shape[1]:], skip_special_tokens=False)
49 print(output_text)

22 frames

/usr/local/lib/python3.12/dist-packages/transformers/integrations/sdpa_attention.py in sdpa_attention_forward(module, query, key, value, attention_mask, dropout, scaling, is_causal, **kwargs)
94 attention_mask = torch.logical_not(attention_mask.bool()).to(query.device)
95
---> 96 attn_output = torch.nn.functional.scaled_dot_product_attention(
97 query,
98 key,

OutOfMemoryError: CUDA out of memory. Tried to allocate 26.20 GiB. GPU 0 has a total capacity of 14.74 GiB of which 6.29 GiB is free. Process 211940 has 8.45 GiB memory in use. Of the allocated memory 8.15 GiB is allocated by PyTorch, and 181.70 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)

Sign up or log in to comment