-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
87 lines (76 loc) · 2.89 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import asyncio
import json
import os
import requests
import time
import tensorneko_util as N
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, required=True)
parser.add_argument("--base_config", type=str, required=True)
parser.add_argument("--model_config", type=str, required=True)
parser.add_argument("--result_folder", type=str, default="./result")
args = parser.parse_args()
from hydra_vl4ai.util.config import Config
Config.base_config_path = args.base_config
Config.model_config_path = args.model_config
from hydra_vl4ai.agent.hydra import HydraNoRL
from hydra_vl4ai.util.console import logger, console
from hydra_vl4ai.util.misc import wait_until_loaded
import exp_datasets
async def main():
with console.status("[bold green]Connect to HYDRA executor...") as status:
wait_until_loaded(f"http://localhost:{Config.base_config['executor_port']}")
hydra = HydraNoRL()
match Config.base_config["dataset"]:
case "gqa":
dataset = exp_datasets.GQA(
args.data_root
)
case "okvqa":
dataset = exp_datasets.OKVQA(
args.data_root
)
case "aokvqa":
# TODO: Not tested yet
# dataset = exp_datasets.AOKVQA(
# f"{args.data_root}/aokvqa",
# "val", f"{args.data_root}/coco", version="v1p0"
# )
raise NotImplementedError("AOKVQA is not implemented yet")
case "refcoco":
dataset = exp_datasets.Refcoco(args.data_root)
case "refcoco+":
dataset = exp_datasets.Refcoco(args.data_root)
case _:
raise ValueError("Invalid dataset")
# output path
Path(args.result_folder).mkdir(parents=True, exist_ok=True)
save_path = Path(args.result_folder) / f"result_{Config.base_config['dataset']}.jsonl"
# resume if the file exists
completed = []
if os.path.exists(save_path):
prev_results = N.io.read.json(str(save_path))
completed = [result["datum_id"] for result in prev_results]
for i, (image_path, datum_id, query, ground_truth) in enumerate(dataset):
if datum_id in completed:
logger.info(f"Skipping {i+1}/{len(dataset)}")
continue
logger.info(f"Processing {i+1}/{len(dataset)}")
with open(image_path, "rb") as f:
image_buffer = f.read()
result = await hydra(image_buffer, query)
logger.info(f"Query: {query} Answer: {result}")
with open(save_path, "a") as f:
f.write(json.dumps({
"datum_id": datum_id,
"query": query,
"ground_truth": ground_truth,
"result": result
}) + "\n")
f.flush()
if __name__ == "__main__":
asyncio.run(main=main())