File size: 3,930 Bytes
3946de7 bdec26b 3946de7 0572ee4 3946de7 ee8a3ee 3946de7 bdec26b 4cacacd f6e6753 3946de7 ff717b9 3946de7 efb41ef 3946de7 cd85cfc bdec26b efb41ef bdec26b efb41ef bdec26b 3946de7 bdec26b 3946de7 bdec26b cd85cfc efb41ef bdec26b 3946de7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info
def read_json(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
return data
def write_json(file_path, data):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
# default: Load the model on the available device(s)
model_path = '/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/ICCV_2025/qvq/models/QVQ-72B-Preview'
model = Qwen2VLForConditionalGeneration.from_pretrained(
model_path, torch_dtype="auto", device_map="auto"
)
# default processer
processor = AutoProcessor.from_pretrained(model_path)
# The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
# min_pixels = 256*28*28
# max_pixels = 1280*28*28
#processor = AutoProcessor.from_pretrained("Qwen/QVQ-72B-Preview", min_pixels=min_pixels, max_pixels=max_pixels)
import glob
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description="Process a dataset with specific index range.")
parser.add_argument("--batch_size", type=int, default = 1,help="batch size")
#parser.add_argument("--index", type=int, default = 0,help="index")
args = parser.parse_args()
folder = "/inspire/hdd/ws-ba572160-47f8-4ca1-984e-d6bcdeb95dbb/a100-maybe/albus/ICCV_2025/qvq/dataset"
images = []
for img_path in glob.glob(f"{folder}/*.jpe"):
img = Image.open(img_path)
images.append(img)
num_image = len(images)
print(f"beigin : {begin}, end : {end}, batch_size : {batch_size}")
begin, end, batch_size= 0, 10, args.batch_size
messages = [
{
"role": "system",
"content": [
{"type": "text", "text": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."}
],
},
{
"role": "user",
"content": [
{
"type": "image",
"image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/QVQ/demo.png",
},
{"type": "text", "text": "Please describe in detail the content of the picture."},
],
}
]
from tqdm import tqdm
# Preparation for inference
ans = []
counter = 0
for batch_idx in tqdm(range(begin, end, batch_size)):
batch = data[batch_idx: min(batch_idx + batch_size, end)]
print(f"data index range : {batch_idx} ~ {min(batch_idx + batch_size, end)}")
image_inputs_batch, video_inputs_batch = [], []
for idx,i in enumerate(batch):
img = images[i]
print('gain image successfully !')
messages[1]["content"][0]["image"] = img
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
image_inputs_batch.append(image_inputs)
video_inputs_batch.append(video_inputs)
inputs = processor(
text=[text],
images=image_inputs_batch,
videos=video_inputs_batch,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=8192)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
#ans.append(output_text)
save_path = "output.json"
counter = counter + 1
if counter % 1 == 0:
print(f"Saving data at iteration {idx + 1}")
write_json(save_path, data)
|