-
Notifications
You must be signed in to change notification settings - Fork 278
/
inference_ram_openset.py
68 lines (50 loc) · 1.94 KB
/
inference_ram_openset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
'''
* The Recognize Anything Model (RAM) inference on unseen classes
* Written by Xinyu Huang
'''
import argparse
import numpy as np
import random
import torch
from PIL import Image
from ram.models import ram
from ram import inference_ram_openset as inference
from ram import get_transform
from ram.utils import build_openset_label_embedding
from torch import nn
parser = argparse.ArgumentParser(
description='Tag2Text inferece for tagging and captioning')
parser.add_argument('--image',
metavar='DIR',
help='path to dataset',
default='images/openset_example.jpg')
parser.add_argument('--pretrained',
metavar='DIR',
help='path to pretrained model',
default='pretrained/ram_swin_large_14m.pth')
parser.add_argument('--image-size',
default=384,
type=int,
metavar='N',
help='input image size (default: 448)')
if __name__ == "__main__":
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transform = get_transform(image_size=args.image_size)
#######load model
model = ram(pretrained=args.pretrained,
image_size=args.image_size,
vit='swin_l')
#######set openset interference
openset_label_embedding, openset_categories = build_openset_label_embedding()
model.tag_list = np.array(openset_categories)
model.label_embed = nn.Parameter(openset_label_embedding.float())
model.num_class = len(openset_categories)
# the threshold for unseen categories is often lower
model.class_threshold = torch.ones(model.num_class) * 0.5
#######
model.eval()
model = model.to(device)
image = transform(Image.open(args.image)).unsqueeze(0).to(device)
res = inference(image, model)
print("Image Tags: ", res)