Spaces:
Sleeping
Sleeping
Move to cuda
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ import spaces
|
|
| 4 |
from standalone_velvet import setup_models
|
| 5 |
|
| 6 |
models_dict = setup_models("visual_bloom.torch")
|
| 7 |
-
visual_bloom = models_dict["visual_bloom"]
|
| 8 |
tokenizer = models_dict["tokenizer"]
|
| 9 |
image_feature_collator = models_dict["image_feature_collator"]
|
| 10 |
|
|
@@ -14,10 +14,10 @@ def run_inference(text_input, image_input):
|
|
| 14 |
image_features, image_attentions = image_feature_collator([image_input])
|
| 15 |
instruction_inputs = tokenizer([text_input], return_tensors="pt")
|
| 16 |
language_output = visual_bloom.generate(
|
| 17 |
-
image_features,
|
| 18 |
-
image_attentions,
|
| 19 |
-
instruction_inputs["input_ids"],
|
| 20 |
-
instruction_inputs["attention_mask"],
|
| 21 |
)
|
| 22 |
|
| 23 |
human_output = tokenizer.decode(language_output[0], skip_special_tokens=True)
|
|
|
|
| 4 |
from standalone_velvet import setup_models
|
| 5 |
|
| 6 |
models_dict = setup_models("visual_bloom.torch")
|
| 7 |
+
visual_bloom = models_dict["visual_bloom"].to('cuda')
|
| 8 |
tokenizer = models_dict["tokenizer"]
|
| 9 |
image_feature_collator = models_dict["image_feature_collator"]
|
| 10 |
|
|
|
|
| 14 |
image_features, image_attentions = image_feature_collator([image_input])
|
| 15 |
instruction_inputs = tokenizer([text_input], return_tensors="pt")
|
| 16 |
language_output = visual_bloom.generate(
|
| 17 |
+
image_features.to('cuda'),
|
| 18 |
+
image_attentions.to('cuda'),
|
| 19 |
+
instruction_inputs["input_ids"].to('cuda'),
|
| 20 |
+
instruction_inputs["attention_mask"].to('cuda'),
|
| 21 |
)
|
| 22 |
|
| 23 |
human_output = tokenizer.decode(language_output[0], skip_special_tokens=True)
|