Skip to content

Commit 21aaf5d

Browse files
author
root
committed
update model url
1 parent 96a956c commit 21aaf5d

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

configs/caption_coco.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ ann_root: 'annotation'
33
coco_gt_root: 'annotation/coco_gt'
44

55
# set pretrained as a file path or an url
6-
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
6+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
77

88
# size of vit model; base or large
99
vit: 'base'

configs/nocaps.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ image_root: '/export/share/datasets/vision/nocaps/'
22
ann_root: 'annotation'
33

44
# set pretrained as a file path or an url
5-
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'
5+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth'
66

77
vit: 'base'
88
batch_size: 32

configs/vqa.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ train_files: ['vqa_train','vqa_val','vg_qa']
44
ann_root: 'annotation'
55

66
# set pretrained as a file path or an url
7-
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'
7+
pretrained: 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
88

99
# size of vit model; base or large
1010
vit: 'base'

demo.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@
9999
"image_size = 384\n",
100100
"image = load_demo_image(image_size=image_size, device=device)\n",
101101
"\n",
102-
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_base_caption.pth'\n",
102+
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'\n",
103103
" \n",
104104
"model = blip_decoder(pretrained=model_url, image_size=image_size, vit='base')\n",
105105
"model.eval()\n",
@@ -153,7 +153,7 @@
153153
"image_size = 480\n",
154154
"image = load_demo_image(image_size=image_size, device=device) \n",
155155
"\n",
156-
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model*_vqa.pth'\n",
156+
"model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'\n",
157157
" \n",
158158
"model = blip_vqa(pretrained=model_url, image_size=image_size, vit='base')\n",
159159
"model.eval()\n",

0 commit comments

Comments
 (0)