ehartford commited on
Commit
27b70f5
·
verified ·
1 Parent(s): 4805182

Update test.py

Browse files
Files changed (1) hide show
  1. test.py +20 -6
test.py CHANGED
@@ -1,10 +1,11 @@
1
- from transformers import AutoModelForVision2Seq, AutoProcessor
2
 
3
- model = AutoModelForVision2Seq.from_pretrained(
4
  "QuixiAI/Prisma-VL-8B",
5
- torch_dtype="auto",
6
  device_map="auto"
7
  )
 
8
  processor = AutoProcessor.from_pretrained("QuixiAI/Prisma-VL-8B")
9
 
10
  messages = [
@@ -15,10 +16,17 @@ messages = [
15
  "type": "image",
16
  "image": "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438",
17
  },
18
- {"type": "text", "text": "Describe your thoughts and your experience of thinking. The phenomenology is more important than the actual answer."},
 
 
 
 
 
 
19
  ],
20
  }
21
  ]
 
22
  inputs = processor.apply_chat_template(
23
  messages,
24
  tokenize=True,
@@ -26,12 +34,18 @@ inputs = processor.apply_chat_template(
26
  return_dict=True,
27
  return_tensors="pt"
28
  )
 
29
  inputs = inputs.to(model.device)
 
30
  generated_ids = model.generate(**inputs, max_new_tokens=1280)
31
  generated_ids_trimmed = [
32
- out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
33
  ]
 
34
  output_text = processor.batch_decode(
35
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
 
 
36
  )
 
37
  print(output_text)
 
1
+ from transformers import AutoModelForImageTextToText, AutoProcessor
2
 
3
+ model = AutoModelForImageTextToText.from_pretrained(
4
  "QuixiAI/Prisma-VL-8B",
5
+ dtype="auto",
6
  device_map="auto"
7
  )
8
+
9
  processor = AutoProcessor.from_pretrained("QuixiAI/Prisma-VL-8B")
10
 
11
  messages = [
 
16
  "type": "image",
17
  "image": "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438",
18
  },
19
+ {
20
+ "type": "text",
21
+ "text": (
22
+ "Describe your thoughts and your experience of thinking. "
23
+ "The phenomenology is more important than the actual answer."
24
+ ),
25
+ },
26
  ],
27
  }
28
  ]
29
+
30
  inputs = processor.apply_chat_template(
31
  messages,
32
  tokenize=True,
 
34
  return_dict=True,
35
  return_tensors="pt"
36
  )
37
+
38
  inputs = inputs.to(model.device)
39
+
40
  generated_ids = model.generate(**inputs, max_new_tokens=1280)
41
  generated_ids_trimmed = [
42
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
43
  ]
44
+
45
  output_text = processor.batch_decode(
46
+ generated_ids_trimmed,
47
+ skip_special_tokens=True,
48
+ clean_up_tokenization_spaces=False
49
  )
50
+
51
  print(output_text)