Skip to content

Commit

Permalink
Merge pull request #60 from jmisilo/57-better-clip
Browse files Browse the repository at this point in the history
Change CLIP version
  • Loading branch information
jmisilo committed Nov 14, 2022
2 parents a9100e3 + c0605e3 commit e561112
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
4 changes: 2 additions & 2 deletions src/dataset_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Load CLIP model and processor
preprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')
model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32').vision_model.to(device)
preprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch14')
model = CLIPModel.from_pretrained('openai/clip-vit-base-patch14').vision_model.to(device)

# Load dataset
df = pd.read_csv(os.path.join(DATA_PATH, 'raw', 'results.csv'), sep='|')
Expand Down
4 changes: 2 additions & 2 deletions src/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ def __init__(self, device='cpu'):

self.device = device

self.preprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32').vision_model.to(self.device)
self.preprocessor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch14')
self.model = CLIPModel.from_pretrained('openai/clip-vit-base-patch14').vision_model.to(self.device)

def forward(self, image):
# only one image at a time
Expand Down

0 comments on commit e561112

Please sign in to comment.