Datasets:
Upload 3 files
Browse files- .gitattributes +1 -0
- data/split_data.py +63 -0
- data/train_annfiles.json +3 -0
- data/val_annfiles.json +0 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
data/train_annfiles.json filter=lfs diff=lfs merge=lfs -text
|
data/split_data.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, json, argparse
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
datasets_type = ["Underwater", "Low_altitude", "Indoor", "High_altitude"]
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def setup_parser():
|
| 8 |
+
parser = argparse.ArgumentParser(description='Split the MSVQA datasets')
|
| 9 |
+
parser.add_argument('--steps', type=int, default=10, help='Total number of steps (>=4)')
|
| 10 |
+
parser.add_argument('--all', type=bool, default=True, help='Learn all scenarios in the first step')
|
| 11 |
+
parser.add_argument('--shuffle', type=bool, default=True, help='Shuffle the order of learning scenarios.')
|
| 12 |
+
parser.add_argument('--random_seed', type=int, default=1993)
|
| 13 |
+
parser.add_argument('--train_anns_pth', type=str, default='./train_annfiles.json')
|
| 14 |
+
parser.add_argument('--save_pth', type=str, default='./workspace')
|
| 15 |
+
return parser
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load_json(p): return json.load(open(p, 'r', encoding='utf-8'))
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def save_json(data_list, save_root, steps_num):
|
| 22 |
+
if not os.path.exists(save_root): os.mkdir(save_root)
|
| 23 |
+
for i, task_data in enumerate(data_list, 1):
|
| 24 |
+
json.dump(task_data, open(os.path.join(save_root, f"{steps_num}-steps-{i}.json"), "w"), indent=4)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def split_datasets(img_data, ann_data, split_num, seed):
|
| 28 |
+
np.random.seed(seed)
|
| 29 |
+
np.random.shuffle(img_data)
|
| 30 |
+
img_num, split_single = len(img_data), len(img_data) // split_num
|
| 31 |
+
init_split = img_num - split_single * (split_num - 1)
|
| 32 |
+
return [[ann for ann in ann_data if
|
| 33 |
+
ann["image"] in img_data[init_split + split_single * (i - 1): init_split + split_single * i]] if i else
|
| 34 |
+
[ann for ann in ann_data if ann["image"] in img_data[:init_split]] for i in range(split_num)]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
if __name__ == "__main__":
|
| 38 |
+
args = setup_parser().parse_args()
|
| 39 |
+
if args.steps < 4: raise "Unsupported step size!"
|
| 40 |
+
train_data = load_json(args.train_anns_pth)
|
| 41 |
+
data, data_img = {}, {}
|
| 42 |
+
for t in datasets_type: data[t], data_img[t] = [], []
|
| 43 |
+
for ann in train_data:
|
| 44 |
+
p = ann["perspective"]
|
| 45 |
+
data[p].append(ann)
|
| 46 |
+
if ann["image"] not in data_img[p]: data_img[p].append(ann["image"])
|
| 47 |
+
|
| 48 |
+
omit_steps = args.steps - 4 * (args.steps // 4) - (1 if args.all else 0)
|
| 49 |
+
for dataset in datasets_type:
|
| 50 |
+
split_num = args.steps // 4 + (1 if omit_steps > 0 else 0) + (1 if args.all else 0)
|
| 51 |
+
if omit_steps > 0: omit_steps -= 1
|
| 52 |
+
data[dataset] = split_datasets(data_img[dataset], data[dataset], split_num, args.random_seed)
|
| 53 |
+
|
| 54 |
+
if args.all:
|
| 55 |
+
init_task = sum([data[t][0] for t in datasets_type], [])
|
| 56 |
+
sub_tasks = sum([data[t][1:] for t in datasets_type], [])
|
| 57 |
+
if args.shuffle: np.random.seed(args.random_seed); np.random.shuffle(sub_tasks)
|
| 58 |
+
task_data = [init_task, *sub_tasks]
|
| 59 |
+
else:
|
| 60 |
+
sub_tasks = sum([data[t] for t in datasets_type], [])
|
| 61 |
+
if args.shuffle: np.random.seed(args.random_seed); np.random.shuffle(sub_tasks)
|
| 62 |
+
task_data = sub_tasks
|
| 63 |
+
save_json(task_data, args.save_pth, args.steps)
|
data/train_annfiles.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:676c33bc7fe62639dd2674c5659c7ebd8bbb09d643403457ee67d7f8a10474bf
|
| 3 |
+
size 20739872
|
data/val_annfiles.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|