from transformers import Qwen3VLForConditionalGeneration, AutoProcessor, BitsAndBytesConfig import torch import gc cache_dir = "./model_cache" print("Freeing up memory...") torch.cuda.empty_cache() gc.collect() model_kwargs = dict( #attn_implementation="eager", # Use "flash_attention_2" when running on Ampere or newer GPU torch_dtype=torch.bfloat16,#torch.float16,#torch.bfloat16, # What torch dtype to use, defaults to auto device_map="auto", # Let torch decide how to load the model ) # BitsAndBytesConfig int-4 config model_kwargs["quantization_config"] = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=model_kwargs["torch_dtype"], bnb_4bit_quant_storage=model_kwargs["torch_dtype"], ) # default: Load the model on the available device(s) model = Qwen3VLForConditionalGeneration.from_pretrained( "Qwen/Qwen3-VL-2B-Instruct",cache_dir=cache_dir, **model_kwargs ) # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios. # model = Qwen3VLForConditionalGeneration.from_pretrained( # "Qwen/Qwen3-VL-4B-Instruct", # dtype=torch.bfloat16, # attn_implementation="flash_attention_2", # device_map="auto", # ) processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-2B-Instruct",cache_dir=cache_dir) messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", }, {"type": "text", "text": "Describe this image."}, ], } ] # Preparation for inference inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ) inputs = inputs.to(model.device) #vedi che non gli passa immagine PIL come fa per gemma3 # Inference: Generation of the output generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] output_text = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) print(output_text)