Update README.md
Browse files
README.md
CHANGED
|
@@ -39,10 +39,17 @@ from torchvision import transforms
|
|
| 39 |
import torch
|
| 40 |
from PIL import Image
|
| 41 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 42 |
|
| 43 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 44 |
|
| 45 |
-
# Load model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
model_path = hf_hub_download(repo_id="PerceptCLIP/PerceptCLIP_Emotions", filename="perceptCLIP_Emotions.pth")
|
| 47 |
model = torch.load(model_path).to(device).eval()
|
| 48 |
|
|
|
|
| 39 |
import torch
|
| 40 |
from PIL import Image
|
| 41 |
from huggingface_hub import hf_hub_download
|
| 42 |
+
import importlib.util
|
| 43 |
|
| 44 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 45 |
|
| 46 |
+
# Load model class
|
| 47 |
+
class_path = hf_hub_download(repo_id="PerceptCLIP/PerceptCLIP_Emotions", filename="modeling.py")
|
| 48 |
+
spec = importlib.util.spec_from_file_location("clip_lora_model", class_path)
|
| 49 |
+
clip_lora_model = importlib.util.module_from_spec(spec)
|
| 50 |
+
spec.loader.exec_module(clip_lora_model)
|
| 51 |
+
|
| 52 |
+
# Load pretrained model
|
| 53 |
model_path = hf_hub_download(repo_id="PerceptCLIP/PerceptCLIP_Emotions", filename="perceptCLIP_Emotions.pth")
|
| 54 |
model = torch.load(model_path).to(device).eval()
|
| 55 |
|