|
|
import os, json, argparse
|
|
|
import numpy as np
|
|
|
|
|
|
datasets_type = ["Underwater", "Low_altitude", "Indoor", "High_altitude"]
|
|
|
|
|
|
|
|
|
def setup_parser():
|
|
|
parser = argparse.ArgumentParser(description='Split the MSVQA datasets')
|
|
|
parser.add_argument('--steps', type=int, default=10, help='Total number of steps (>=4)')
|
|
|
parser.add_argument('--all', type=bool, default=True, help='Learn all scenarios in the first step')
|
|
|
parser.add_argument('--shuffle', type=bool, default=True, help='Shuffle the order of learning scenarios.')
|
|
|
parser.add_argument('--random_seed', type=int, default=1993)
|
|
|
parser.add_argument('--train_anns_pth', type=str, default='./train_annfiles.json')
|
|
|
parser.add_argument('--save_pth', type=str, default='./workspace')
|
|
|
return parser
|
|
|
|
|
|
|
|
|
def load_json(p): return json.load(open(p, 'r', encoding='utf-8'))
|
|
|
|
|
|
|
|
|
def save_json(data_list, save_root, steps_num):
|
|
|
if not os.path.exists(save_root): os.mkdir(save_root)
|
|
|
for i, task_data in enumerate(data_list, 1):
|
|
|
json.dump(task_data, open(os.path.join(save_root, f"{steps_num}-steps-{i}.json"), "w"), indent=4)
|
|
|
|
|
|
|
|
|
def split_datasets(img_data, ann_data, split_num, seed):
|
|
|
np.random.seed(seed)
|
|
|
np.random.shuffle(img_data)
|
|
|
img_num, split_single = len(img_data), len(img_data) // split_num
|
|
|
init_split = img_num - split_single * (split_num - 1)
|
|
|
return [[ann for ann in ann_data if
|
|
|
ann["image"] in img_data[init_split + split_single * (i - 1): init_split + split_single * i]] if i else
|
|
|
[ann for ann in ann_data if ann["image"] in img_data[:init_split]] for i in range(split_num)]
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
args = setup_parser().parse_args()
|
|
|
if args.steps < 4: raise "Unsupported step size!"
|
|
|
train_data = load_json(args.train_anns_pth)
|
|
|
data, data_img = {}, {}
|
|
|
for t in datasets_type: data[t], data_img[t] = [], []
|
|
|
for ann in train_data:
|
|
|
p = ann["perspective"]
|
|
|
data[p].append(ann)
|
|
|
if ann["image"] not in data_img[p]: data_img[p].append(ann["image"])
|
|
|
|
|
|
omit_steps = args.steps - 4 * (args.steps // 4) - (1 if args.all else 0)
|
|
|
for dataset in datasets_type:
|
|
|
split_num = args.steps // 4 + (1 if omit_steps > 0 else 0) + (1 if args.all else 0)
|
|
|
if omit_steps > 0: omit_steps -= 1
|
|
|
data[dataset] = split_datasets(data_img[dataset], data[dataset], split_num, args.random_seed)
|
|
|
|
|
|
if args.all:
|
|
|
init_task = sum([data[t][0] for t in datasets_type], [])
|
|
|
sub_tasks = sum([data[t][1:] for t in datasets_type], [])
|
|
|
if args.shuffle: np.random.seed(args.random_seed); np.random.shuffle(sub_tasks)
|
|
|
task_data = [init_task, *sub_tasks]
|
|
|
else:
|
|
|
sub_tasks = sum([data[t] for t in datasets_type], [])
|
|
|
if args.shuffle: np.random.seed(args.random_seed); np.random.shuffle(sub_tasks)
|
|
|
task_data = sub_tasks
|
|
|
save_json(task_data, args.save_pth, args.steps)
|
|
|
|