From e440931d2809175566b29e67cd9a10d7bfea714a Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:29:29 -0500 Subject: [PATCH 01/56] normalize and dinamic graph --- SL-GCN/config/sign/train/train_joint.yaml | 19 +- SL-GCN/feeders/feeder.py | 40 +++- SL-GCN/graph/sign_27.py | 33 ++- SL-GCN/main.py | 261 +++++++++++++++++++--- SL-GCN/model/decouple_gcn_attn.py | 2 +- SL-GCN/points.csv | 72 ++++++ SL-GCN/wandbFunctions.py | 61 +++++ 7 files changed, 440 insertions(+), 48 deletions(-) create mode 100644 SL-GCN/points.csv create mode 100644 SL-GCN/wandbFunctions.py diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 0df2ade..c8bdefd 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -3,8 +3,9 @@ Experiment_name: sign_joint_final # feeder feeder: feeders.feeder.Feeder train_feeder_args: - data_path: ./data/sign/27_2/train_data_joint.npy - label_path: ./data/sign/27_2/train_label.pkl + data_path: ./data/sign/1/train_data_joint.npy + label_path: ./data/sign/1/train_label.pkl + meaning_path: ./data/sign/1/meaning.pkl debug: False random_choose: True window_size: 100 @@ -15,16 +16,18 @@ train_feeder_args: is_vector: False test_feeder_args: - data_path: ./data/sign/27_2/val_data_joint.npy - label_path: ./data/sign/27_2/val_gt.pkl + data_path: ./data/sign/1/val_data_joint.npy + label_path: ./data/sign/1/val_label.pkl + meaning_path: ./data/sign/1/meaning.pkl random_mirror: False normalization: True # model +# 226 (num classes) model: model.decouple_gcn_attn.Model model_args: - num_class: 226 - num_point: 27 + num_class: 101 #53 110 # AEC=28, PUCP=36 , WLASL=101 + num_point: 71 num_person: 1 graph: graph.sign_27.Graph groups: 16 @@ -38,11 +41,11 @@ base_lr: 0.1 step: [150, 200] # training -device: [0,1,2,3] +device: [1] keep_rate: 0.9 only_train_epoch: 1 batch_size: 64 test_batch_size: 64 num_epoch: 250 nesterov: True -warm_up_epoch: 20 \ No newline at end of file +warm_up_epoch: 20 diff --git a/SL-GCN/feeders/feeder.py b/SL-GCN/feeders/feeder.py index b4d29c9..c2715b7 100644 --- a/SL-GCN/feeders/feeder.py +++ b/SL-GCN/feeders/feeder.py @@ -7,10 +7,14 @@ sys.path.extend(['../']) from feeders import tools -flip_index = np.concatenate(([0,2,1,4,3,6,5],[17,18,19,20,21,22,23,24,25,26],[7,8,9,10,11,12,13,14,15,16]), axis=0) + +# 71 points +flip_index = np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0) + +#flip_index = np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0) class Feeder(Dataset): - def __init__(self, data_path, label_path, + def __init__(self, data_path, label_path, meaning_path, random_choose=False, random_shift=False, random_move=False, window_size=-1, normalization=False, debug=False, use_mmap=True, random_mirror=False, random_mirror_p=0.5, is_vector=False): """ @@ -29,6 +33,7 @@ def __init__(self, data_path, label_path, self.debug = debug self.data_path = data_path self.label_path = label_path + self.meaning_path = meaning_path self.random_choose = random_choose self.random_shift = random_shift self.random_move = random_move @@ -41,7 +46,6 @@ def __init__(self, data_path, label_path, self.is_vector = is_vector if normalization: self.get_mean_map() - print(len(self.label)) def load_data(self): # data: N C V T M @@ -63,6 +67,14 @@ def load_data(self): self.label = self.label[0:100] self.data = self.data[0:100] self.sample_name = self.sample_name[0:100] + try: + with open(self.meaning_path) as f: + self.meaning = pickle.load(f) + except: + # for pickle file from python2 + with open(self.meaning_path, 'rb') as f: + self.meaning = pickle.load(f, encoding='latin1') + def get_mean_map(self): data = self.data @@ -79,6 +91,7 @@ def __iter__(self): def __getitem__(self, index): data_numpy = self.data[index] label = self.label[index] + name = self.sample_name[index] data_numpy = np.array(data_numpy) if self.random_choose: @@ -86,31 +99,36 @@ def __getitem__(self, index): if self.random_mirror: if random.random() > self.random_mirror_p: - assert data_numpy.shape[2] == 27 + #print("dabe before random mirror", data_numpy) + assert data_numpy.shape[2] == 71 data_numpy = data_numpy[:,:,flip_index,:] if self.is_vector: data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] else: - data_numpy[0,:,:,:] = 512 - data_numpy[0,:,:,:] + data_numpy[0,:,:,:] = 1 - data_numpy[0,:,:,:] + #print("dabe after random mirror", data_numpy) if self.normalization: # data_numpy = (data_numpy - self.mean_map) / self.std_map - assert data_numpy.shape[0] == 3 + assert data_numpy.shape[0] == 2 + #print("dabe before norm", data_numpy) if self.is_vector: data_numpy[0,:,0,:] = data_numpy[0,:,0,:] - data_numpy[0,:,0,0].mean(axis=0) data_numpy[1,:,0,:] = data_numpy[1,:,0,:] - data_numpy[1,:,0,0].mean(axis=0) else: data_numpy[0,:,:,:] = data_numpy[0,:,:,:] - data_numpy[0,:,0,0].mean(axis=0) data_numpy[1,:,:,:] = data_numpy[1,:,:,:] - data_numpy[1,:,0,0].mean(axis=0) - + #print("dabe after norm", data_numpy) if self.random_shift: + + #print("dabe before shift", data_numpy) if self.is_vector: data_numpy[0,:,0,:] += random.random() * 20 - 10.0 data_numpy[1,:,0,:] += random.random() * 20 - 10.0 else: - data_numpy[0,:,:,:] += random.random() * 20 - 10.0 - data_numpy[1,:,:,:] += random.random() * 20 - 10.0 - + data_numpy[0,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 + data_numpy[1,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 + #print("dabe after shift", data_numpy) # if self.random_shift: # data_numpy = tools.random_shift(data_numpy) @@ -120,7 +138,7 @@ def __getitem__(self, index): if self.random_move: data_numpy = tools.random_move(data_numpy) - return data_numpy, label, index + return data_numpy, label, index, name def top_k(self, score, top_k): rank = score.argsort() diff --git a/SL-GCN/graph/sign_27.py b/SL-GCN/graph/sign_27.py index 344ec61..57e3da3 100644 --- a/SL-GCN/graph/sign_27.py +++ b/SL-GCN/graph/sign_27.py @@ -2,9 +2,19 @@ sys.path.extend(['../']) from graph import tools +import pandas as pd -num_node = 27 +points = pd.read_csv("points.csv") +ori = points.origin +tar = points.tarjet + +inward_ori_index = [(o,t) for o, t in zip(ori, tar)] + +num_node = 71 +print("NUM OF NODES:", num_node) self_link = [(i, i) for i in range(num_node)] + +''' inward_ori_index = [(5, 6), (5, 7), (6, 8), (8, 10), (7, 9), (9, 11), (12,13),(12,14),(12,16),(12,18),(12,20), @@ -13,7 +23,26 @@ (24,25),(26,27),(28,29),(30,31), (10,12),(11,22)] -inward = [(i - 5, j - 5) for (i, j) in inward_ori_index] + +inward_ori_index = [(1, 2), (1, 3), (2, 4), (4, 6), (3, 5), (5, 7), + + (6, 8), + (8, 9), (9, 10), (10, 11), (11, 12), + (8, 13), (13, 14), (14, 15), (15, 16), + (8, 17), (17, 18), (18, 19), (19, 20), + (8, 21), (21, 22), (22, 23), (23, 24), + (8, 25), (25, 26), (26, 27), (27, 28), + + (7, 29), + (29, 30), (30, 31), (31, 32), (32, 33), + (29, 34), (34, 35), (35, 36), (36, 37), + (29, 38), (38, 39), (39, 40), (40, 41), + (29, 42), (42, 43), (43, 44), (44, 45), + (29, 46), (46, 47), (47, 48), (48, 49) + ] +''' + +inward = [(i - 1, j - 1) for (i, j) in inward_ori_index] outward = [(j, i) for (i, j) in inward] neighbor = inward + outward diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 60dcb00..9da6f60 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -2,11 +2,14 @@ from __future__ import print_function import argparse import os +from termios import VMIN import time +from xml.dom import minicompat import numpy as np import yaml import pickle from collections import OrderedDict +import csv # torch import torch import torch.nn as nn @@ -17,9 +20,14 @@ from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR import random import inspect +import torchmetrics +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd import torch.backends.cudnn as cudnn import torch.nn.functional as F - +import wandbFunctions as wandbF +import wandb # class LabelSmoothingCrossEntropy(nn.Module): # def __init__(self): # super(LabelSmoothingCrossEntropy, self).__init__() @@ -32,12 +40,16 @@ # loss = confidence * nll_loss + smoothing * smooth_loss # return loss.mean() +wandbFlag = True + +model_name = '' + def init_seed(_): torch.cuda.manual_seed_all(1) torch.manual_seed(1) np.random.seed(1) random.seed(1) - # torch.backends.cudnn.enabled = False + #torch.backends.cudnn.enabled = False torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False @@ -192,7 +204,7 @@ class Processor(): def __init__(self, arg): - arg.model_saved_name = "./save_models/" + arg.Experiment_name + arg.model_saved_name = arg.file_name + '/' + arg.Experiment_name arg.work_dir = "./work_dir/" + arg.Experiment_name self.arg = arg self.save_arg() @@ -215,9 +227,13 @@ def __init__(self, arg): self.load_data() self.lr = self.arg.base_lr self.best_acc = 0 + self.best_tmp_acc = 0 def load_data(self): Feeder = import_class(self.arg.feeder) + ln = Feeder(**self.arg.test_feeder_args) + self.meaning = ln.meaning + #print(ln.meaning) self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( @@ -236,13 +252,15 @@ def load_data(self): worker_init_fn=init_seed) def load_model(self): - output_device = self.arg.device[0] if type( - self.arg.device) is list else self.arg.device - self.output_device = output_device + output_device = self.arg.device[0] if type( + self.arg.device) is list else self.arg.device + self.output_device = output_device Model = import_class(self.arg.model) shutil.copy2(inspect.getfile(Model), self.arg.work_dir) self.model = Model(**self.arg.model_args).cuda(output_device) # print(self.model) + if wandbFlag: + wandbF.watch(self.model) self.loss = nn.CrossEntropyLoss().cuda(output_device) # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) @@ -283,6 +301,7 @@ def load_model(self): output_device=output_device) def load_optimizer(self): + if self.arg.optimizer == 'SGD': params_dict = dict(self.model.named_parameters()) @@ -296,7 +315,14 @@ def load_optimizer(self): params += [{'params': value, 'lr': self.arg.base_lr, 'lr_mult': lr_mult, 'decay_mult': decay_mult, 'weight_decay': weight_decay}] - + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"], + "momentum":0.9 + } self.optimizer = optim.SGD( params, momentum=0.9, @@ -306,6 +332,13 @@ def load_optimizer(self): self.model.parameters(), lr=self.arg.base_lr, weight_decay=self.arg.weight_decay) + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"] + } else: raise ValueError() @@ -314,6 +347,7 @@ def load_optimizer(self): threshold=1e-4, threshold_mode='rel', cooldown=0) + def save_arg(self): # save arg arg_dict = vars(self.arg) @@ -321,10 +355,12 @@ def save_arg(self): if not os.path.exists(self.arg.work_dir): os.makedirs(self.arg.work_dir) os.makedirs(self.arg.work_dir + '/eval_results') + os.makedirs(self.arg.work_dir + '/eval_results/'+ model_name, exist_ok = True) with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f: yaml.dump(arg_dict, f) + def adjust_learning_rate(self, epoch): if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam': if epoch < self.arg.warm_up_epoch: @@ -338,10 +374,12 @@ def adjust_learning_rate(self, epoch): else: raise ValueError() + def print_time(self): localtime = time.asctime(time.localtime(time.time())) self.print_log("Local current time : " + localtime) + def print_log(self, str, print_time=True): if print_time: localtime = time.asctime(time.localtime(time.time())) @@ -351,22 +389,30 @@ def print_log(self, str, print_time=True): with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f: print(str, file=f) + def record_time(self): self.cur_time = time.time() return self.cur_time + def split_time(self): split_time = time.time() - self.cur_time self.record_time() return split_time - def train(self, epoch, save_model=False): + + def train(self, epoch, save_model=False): self.model.train() self.print_log('Training epoch: {}'.format(epoch + 1)) loader = self.data_loader['train'] self.adjust_learning_rate(epoch) loss_value = [] + predict_arr = [] + proba_arr = [] + target_arr = [] + self.record_time() + timer = dict(dataloader=0.001, model=0.001, statistics=0.001) process = tqdm(loader) if epoch >= self.arg.only_train_epoch: @@ -381,8 +427,12 @@ def train(self, epoch, save_model=False): if 'DecoupleA' in key: value.requires_grad = False print(key + '-not require grad') - for batch_idx, (data, label, index) in enumerate(process): + meaning = list(self.meaning.values()) + + for batch_idx, (data, label, index, name) in enumerate(process): self.global_step += 1 + + label_tmp = label.cpu().numpy() # get data data = Variable(data.float().cuda( self.output_device), requires_grad=False) @@ -403,14 +453,22 @@ def train(self, epoch, save_model=False): else: l1 = 0 loss = self.loss(output, label) + l1 + + #for r,s in zip(name,label_tmp): + # meaning[s]= '_'.join(r.split('_')[:-1]) self.optimizer.zero_grad() loss.backward() self.optimizer.step() - loss_value.append(loss.data) + loss_value.append(loss.data.cpu().numpy()) timer['model'] += self.split_time() value, predict_label = torch.max(output.data, 1) + + predict_arr.append(predict_label.cpu().numpy()) + target_arr.append(label.data.cpu().numpy()) + proba_arr.append(output.data.cpu().numpy()) + acc = torch.mean((predict_label == label.data).float()) self.lr = self.optimizer.param_groups[0]['lr'] @@ -420,29 +478,49 @@ def train(self, epoch, save_model=False): '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( batch_idx, len(loader), loss.data, self.lr)) timer['statistics'] += self.split_time() - + + predict_arr = np.concatenate(predict_arr) + target_arr = np.concatenate(target_arr) + proba_arr = np.concatenate(proba_arr) + accuracy = torch.mean((predict_label == label.data).float()) + if accuracy >= self.best_tmp_acc: + self.best_tmp_acc = accuracy + + if epoch+1 == arg.num_epoch: + + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + #y_true=list(label.values()), + #preds=list(predict_label.values()), + y_true=list(target_arr), + preds=list(predict_arr), + class_names=meaning, + title="TRAIN_conf_mat")}) + + if wandbFlag: + wandbF.wandbTrainLog(np.mean(loss_value), accuracy) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) for k, v in timer.items() } + - state_dict = self.model.state_dict() - weights = OrderedDict([[k.split('module.')[-1], - v.cpu()] for k, v in state_dict.items()]) - - torch.save(weights, self.arg.model_saved_name + - '-' + str(epoch) + '.pt') - - def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None): + def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None, isTest=False): if wrong_file is not None: f_w = open(wrong_file, 'w') if result_file is not None: f_r = open(result_file, 'w') + #if isTest: + submission = dict() + trueLabels = dict() + + meaning = list(self.meaning.values()) self.model.eval() with torch.no_grad(): self.print_log('Eval epoch: {}'.format(epoch + 1)) for ln in loader_name: + loss_value = [] score_frag = [] right_num_total = 0 @@ -451,7 +529,8 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r step = 0 process = tqdm(self.data_loader[ln]) - for batch_idx, (data, label, index) in enumerate(process): + for batch_idx, (data, label, index, name) in enumerate(process): + label_tmp = label data = Variable( data.float().cuda(self.output_device), requires_grad=False) @@ -467,11 +546,18 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r l1 = l1.mean() else: l1 = 0 + loss = self.loss(output, label) score_frag.append(output.data.cpu().numpy()) loss_value.append(loss.data.cpu().numpy()) - _, predict_label = torch.max(output.data, 1) + _, predict_label = torch.max(output.data, 1) + + #if isTest: + for j in range(output.size(0)): + submission[name[j]] = predict_label[j].item() + trueLabels[name[j]] = label_tmp[j].item() + step += 1 if wrong_file is not None or result_file is not None: @@ -490,17 +576,73 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r len(score)) accuracy = self.data_loader[ln].dataset.top_k(score, 1) + top5 = self.data_loader[ln].dataset.top_k(score, 5) + if accuracy > self.best_acc: self.best_acc = accuracy + score_dict = dict( zip(self.data_loader[ln].dataset.sample_name, score)) - with open('./work_dir/' + arg.Experiment_name + '/eval_results/best_acc' + '.pkl'.format( + conf_mat = torchmetrics.ConfusionMatrix(num_classes=self.arg.model_args["num_class"]) + confusion_matrix = conf_mat(torch.tensor(list(submission.values())).cpu(), torch.tensor(list(trueLabels.values())).cpu()) + confusion_matrix = confusion_matrix.detach().cpu().numpy() + + plt.figure(figsize = (10,7)) + + group_counts = ["{0:0.0f}".format(value) for value in confusion_matrix.flatten()] + confusion_matrix = np.asarray([line/np.sum(line) for line in confusion_matrix]) + confusion_matrix = np.nan_to_num(confusion_matrix) + df_cm = pd.DataFrame(confusion_matrix * 100, index = meaning, columns=meaning) + #size_arr = df_cm.sum(axis = 1) + #maxi = max(size_arr) + + group_percentages = ["{0:.1%}".format(value) for value in confusion_matrix.flatten()] + + annot = ["{1}".format(v2, v1) for v1, v2 in zip(group_counts, group_percentages)] + annot = np.asarray(annot).reshape(self.arg.model_args["num_class"], self.arg.model_args["num_class"]) + fig_ = sns.heatmap(df_cm, vmax=100, vmin=0, annot=annot, annot_kws={"size": 5}, cbar_kws={'format': '%.0f%%', 'ticks':[0, 25, 50, 75, 100]},fmt='', cmap='Blues').get_figure() + plt.ylabel('True label') + plt.xlabel('Predicted label' ) + + plt.close(fig_) + wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) + + with open('./work_dir/' + arg.Experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( epoch, accuracy), 'wb') as f: pickle.dump(score_dict, f) + # Save the model + state_dict = self.model.state_dict() + weights = OrderedDict([[k.split('module.')[-1], + v.cpu()] for k, v in state_dict.items()]) + torch.save(weights, self.arg.model_saved_name + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + + + if epoch + 1 == arg.num_epoch: + + + wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ + labels=meaning, classes_to_plot=None)}) + + wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, + labels=meaning, classes_to_plot=None)}) + + #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, + # , meaning_3)}) + ''' + wandb.log({"VAL_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + y_true=list(trueLabels.values()), + preds=list(submission.values()), + class_names=meaning_3, + title="VAL_conf_mat")}) + ''' + print('Eval Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name) + if wandbFlag: + wandbF.wandbValLog(np.mean(loss_value), accuracy, top5) score_dict = dict( zip(self.data_loader[ln].dataset.sample_name, score)) @@ -509,11 +651,50 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r for k in self.arg.show_topk: self.print_log('\tTop{}: {:.2f}%'.format( k, 100 * self.data_loader[ln].dataset.top_k(score, k))) - + ''' with open('./work_dir/' + arg.Experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format( epoch, accuracy), 'wb') as f: pickle.dump(score_dict, f) + ''' + + + predLabels = [] + groundLabels = [] + print("END") + if isTest: + #print(submission) + #print(trueLabels) + totalRows = 0 + with open("submission.csv", 'w') as of: + writer = csv.writer(of) + accum = 0 + for trueName, truePred in trueLabels.items(): + + sample = trueName + #print(f'Predicting {sample}', end=' ') + #print(f'as {submission[sample]} - pred {submission[sample]} and real {row[1]}') + match=0 + predLabels.append(submission[sample]) + groundLabels.append(int(truePred)) + if int(truePred) == int(submission[sample]): + match=1 + accum+=1 + totalRows+=1 + + # identifying subject + with open("pucpSubject.csv") as subjectFile: + readerSubject = csv.reader(subjectFile) + idx = int(sample.split('_')[-1]) + subjectName = 'NA' + for name, idxStart, idxEnd in readerSubject: + if (int(idxStart) <= idx) and (idx<= int(idxEnd)): + subjectName = name + break + writer.writerow([sample, submission[sample], str(truePred), str(match), subjectName]) + return np.mean(loss_value) + + def start(self): if self.arg.phase == 'train': self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg)))) @@ -547,7 +728,7 @@ def start(self): self.print_log('Model: {}.'.format(self.arg.model)) self.print_log('Weights: {}.'.format(self.arg.weights)) self.eval(epoch=self.arg.start_epoch, save_score=self.arg.save_score, - loader_name=['test'], wrong_file=wf, result_file=rf) + loader_name=['test'], wrong_file=wf, result_file=rf, isTest=True) self.print_log('Done.\n') @@ -571,19 +752,47 @@ def import_class(name): if __name__ == '__main__': parser = get_parser() + wandb.init(project="Connecting-points", + entity="joenatan30", + config={"num-epoch": 500, + "weight-decay": 0.0001, + "batch-size":32, + "base-lr": 0.05, + "kp-model":"wholepose", + "database":"WLASL"}) + + config = wandb.config + # load arg form config file p = parser.parse_args() if p.config is not None: with open(p.config, 'r') as f: - default_arg = yaml.load(f) + #default_arg = yaml.load(f) + default_arg = yaml.safe_load(f) key = vars(p).keys() for k in default_arg.keys(): if k not in key: print('WRONG ARG: {}'.format(k)) assert (k in key) parser.set_defaults(**default_arg) - + arg = parser.parse_args() + arg.base_lr = config["base-lr"] + arg.batch_size = config["batch-size"] + arg.weight_decay = config["weight-decay"] + arg.num_epoch = config["num-epoch"] + arg.kp_model = config["kp-model"] + arg.database = config["database"] + + arg.file_name = "./save_models/"+ arg.Experiment_name + arg.model_saved_name + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + os.makedirs(arg.file_name,exist_ok=True) + + runAndModelName = arg.kp_model + '-' + arg.database + "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.model_args["num_class"]) + "-Batch" + str(arg.batch_size) + + model_name = runAndModelName + wandb.run.name = runAndModelName + wandb.run.save() + init_seed(0) processor = Processor(arg) processor.start() diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index b847d43..abf8b49 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -222,7 +222,7 @@ def forward(self, x, keep_prob): class Model(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=3): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): super(Model, self).__init__() if graph is None: diff --git a/SL-GCN/points.csv b/SL-GCN/points.csv new file mode 100644 index 0000000..4d6f637 --- /dev/null +++ b/SL-GCN/points.csv @@ -0,0 +1,72 @@ +tar_name,ori_name,mp_pos,wb_pos,op_pos,origin,tarjet +pose_nose,pose_nose,1,1,1,1,1 +pose_left_eye,pose_nose,3,2,17,1,2 +pose_right_eye,pose_nose,6,3,16,1,3 +pose_left_shoulder,pose_nose,12,6,6,1,4 +pose_right_shoulder,pose_nose,13,7,3,1,5 +pose_left_elbow,pose_left_shoulder,14,8,7,4,6 +pose_right_elbow,pose_right_shoulder,15,9,4,5,7 +pose_left_wrist,pose_left_elbow,16,10,8,6,8 +pose_right_wrist,pose_right_elbow,17,11,5,7,9 +face_right_mouth_up,pose_nose,71,74,76,1,10 +face_right_eyebrow_inner,pose_nose,89,45,47,1,11 +face_right_mouth_corner,face_right_mouth_up,91,72,74,10,12 +face_right_eyebrow_outer,face_right_eyebrow_middle,104,41,43,15,13 +face_right_mouth_down,face_right_mouth_corner,118,80,82,12,14 +face_right_eyebrow_middle,face_right_eyebrow_inner,139,43,45,11,15 +face_right_eye_outer,face_right_eyebrow_outer,164,60,62,13,16 +face_right_jaw_up,face_right_jaw_middle,166,27,29,20,17 +face_right_eye_inner,face_right_eye_outer,167,63,65,16,18 +face_right_jaw_down,pose_nose,182,31,33,1,19 +face_right_jaw_middle,face_right_jaw_down,206,29,31,19,20 +face_left_mouth_up,pose_nose,301,76,78,1,21 +face_left_eyebrow_inner,pose_nose,319,50,48,1,22 +face_left_mouth_corner,face_left_mouth_up,321,78,80,21,23 +face_left_eyebrow_outer,face_left_eyebrow_middle,334,46,52,26,24 +face_left_mouth_down,face_left_mouth_corner,348,82,84,23,25 +face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,22,26 +face_left_eye_outer,face_left_eyebrow_outer,393,69,71,24,27 +face_left_jaw_up,face_left_jaw_middle,395,37,39,31,28 +face_left_eye_inner,face_left_eye_outer,396,66,68,27,29 +face_left_jaw_down,pose_nose,411,33,35,1,30 +face_left_jaw_middle,face_left_jaw_down,431,35,37,30,31 +leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,32 +leftHand_thumb_mcp,leftHand_thumb_cmc,504,94,98,32,33 +leftHand_thumb_ip,leftHand_thumb_mcp,505,95,99,33,34 +leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,34,35 +leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,36 +leftHand_index_finger_pip,leftHand_index_finger_mcp,508,98,102,36,37 +leftHand_index_finger_dip,leftHand_index_finger_pip,509,99,103,37,38 +leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,38,39 +leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,40 +leftHand_middle_finger_pip,leftHand_middle_finger_mcp,512,102,106,40,41 +leftHand_middle_finger_dip,leftHand_middle_finger_pip,513,103,107,41,42 +leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,42,43 +leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,44 +leftHand_ring_finger_pip,leftHand_ring_finger_mcp,516,106,110,44,45 +leftHand_ring_finger_dip,leftHand_ring_finger_pip,517,107,111,45,46 +leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,46,47 +leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,48 +leftHand_pinky_pip,leftHand_pinky_mcp,520,110,114,48,49 +leftHand_pinky_dip,leftHand_pinky_pip,521,111,115,49,50 +leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,50,51 +rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,52 +rightHand_thumb_mcp,rightHand_thumb_cmc,525,115,119,52,53 +rightHand_thumb_ip,rightHand_thumb_mcp,526,116,120,53,54 +rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,54,55 +rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,56 +rightHand_index_finger_pip,rightHand_index_finger_mcp,529,119,123,56,57 +rightHand_index_finger_dip,rightHand_index_finger_pip,530,120,124,57,58 +rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,58,59 +rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,60 +rightHand_middle_finger_pip,rightHand_middle_finger_mcp,533,123,127,60,61 +rightHand_middle_finger_dip,rightHand_middle_finger_pip,534,124,128,61,62 +rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,62,63 +rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,64 +rightHand_ring_finger_pip,rightHand_ring_finger_mcp,537,127,131,64,65 +rightHand_ring_finger_dip,rightHand_ring_finger_pip,538,128,132,65,66 +rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,66,67 +rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,68 +rightHand_pinky_pip,rightHand_pinky_mcp,541,131,135,68,69 +rightHand_pinky_dip,rightHand_pinky_pip,542,132,136,69,70 +rightHand_pinky_tip,rightHand_pinky_dip,543,133,137,70,71 diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py new file mode 100644 index 0000000..fa93af6 --- /dev/null +++ b/SL-GCN/wandbFunctions.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jun 8 20:34:41 2021 + +@author: joe +""" +import wandb + + +def initConfigWandb(num_layers, num_classes, batch_size, + nEpoch, lrn_rate, hidden_size, dropout, + weight_decay, epsilon): + + wandb.init(project="smileLab-PSL", entity="joenatan30") + run = wandb.init() + + config = wandb.config + + config["num_layers"] = num_layers + config["num_classes"] = num_classes + config.batch_size = batch_size + config.epochs = nEpoch + config.learning_rate = lrn_rate + config["hidden_size"] = hidden_size + config.dropout = dropout + config["weight_decay"] = weight_decay + config["epsilon"] = epsilon + +def wandbTrainLog(trainLoss, TrainAcc): + wandb.log({"Train loss": trainLoss, + "Train accuracy": TrainAcc + }) + +def wandbValLog(testLoss, TestAcc, top5): + wandb.log({"Val Loss": testLoss, + "Val accuracy": TestAcc, + "Val Top5 acc": top5}) + +def watch(model): + wandb.watch(model) + +def finishWandb(): + wandb.finish() + +def sendConfusionMatrix(ground_truth, predictions, class_names, cmTrain=True): + if(cmTrain): + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + probs=None, + y_true=ground_truth, + preds=predictions, + class_names=class_names, + title="TRAIN_conf_mat")}) + else: + wandb.log({"TEST_conf_mat" : wandb.plot.confusion_matrix( + probs=None, + y_true=ground_truth, + preds=predictions, + class_names=class_names, + title="TEST_conf_mat")}) + \ No newline at end of file From dc815f4f4598126304686d668e1c2514aea42a55 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:33:28 -0500 Subject: [PATCH 02/56] folders --- SL-GCN/save_models/raw | 0 SL-GCN/work_dir/raw | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 SL-GCN/save_models/raw create mode 100644 SL-GCN/work_dir/raw diff --git a/SL-GCN/save_models/raw b/SL-GCN/save_models/raw new file mode 100644 index 0000000..e69de29 diff --git a/SL-GCN/work_dir/raw b/SL-GCN/work_dir/raw new file mode 100644 index 0000000..e69de29 From 31aff52e01c46d36650ebacc5582bef502f190f5 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:34:13 -0500 Subject: [PATCH 03/56] no raw in new folders --- SL-GCN/save_models/raw | 0 SL-GCN/work_dir/raw | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 SL-GCN/save_models/raw delete mode 100644 SL-GCN/work_dir/raw diff --git a/SL-GCN/save_models/raw b/SL-GCN/save_models/raw deleted file mode 100644 index e69de29..0000000 diff --git a/SL-GCN/work_dir/raw b/SL-GCN/work_dir/raw deleted file mode 100644 index e69de29..0000000 From 79b5540933e53e1a25869961c8693e5af3444879 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:37:21 -0500 Subject: [PATCH 04/56] keep folders --- SL-GCN/save_models/.gitkeep | 0 SL-GCN/work_dir/.gitkeep | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 SL-GCN/save_models/.gitkeep create mode 100644 SL-GCN/work_dir/.gitkeep diff --git a/SL-GCN/save_models/.gitkeep b/SL-GCN/save_models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/SL-GCN/work_dir/.gitkeep b/SL-GCN/work_dir/.gitkeep new file mode 100644 index 0000000..e69de29 From 5b0d2a60cd8b195002fae62f37de931450f02c0a Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:50:45 -0500 Subject: [PATCH 05/56] get data from connecting points --- SL-GCN/data_gen/getConnectingPoint.py | 122 ++++++++++++++++++++++++++ SL-GCN/runModel.sh | 1 + 2 files changed, 123 insertions(+) create mode 100644 SL-GCN/data_gen/getConnectingPoint.py create mode 100644 SL-GCN/runModel.sh diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py new file mode 100644 index 0000000..3ebb3c0 --- /dev/null +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -0,0 +1,122 @@ +import pickle +import sys +import numpy as np +import pandas as pd +import os +import h5py +import pandas as pd +sys.path.extend(['../']) + +max_body_true = 1 +max_frame = 150 +num_channels = 2 + +def get_mp_keys(points): + tar = np.array(points.mp_pos)-1 + return list(tar) + +def get_op_keys(points): + tar = np.array(points.op_pos)-1 + return list(tar) + +def get_wp_keys(points): + tar = np.array(points.wb_pos)-1 + return list(tar) + +def read_data(path, model_key_getter): + data = [] + classes = [] + videoName = [] + + with h5py.File(path, "r") as f: + for index in f.keys(): + classes.append(f[index]['label'][...].item().decode('utf-8')) + videoName.append(f[index]['video_name'][...].item().decode('utf-8')) + data.append(f[index]["data"][...]) + + points = pd.read_csv("../points.csv") + + tar = model_key_getter(points) + + data = [d[:,:,tar] for d in data] + + meaning = {v:k for (k,v) in enumerate(sorted(set(classes)))} + + retrive_meaning = {k:v for (k,v) in enumerate(sorted(set(classes)))} + + labels = [meaning[label] for label in classes] + + return labels, videoName, data, retrive_meaning + + +def gendata(data_path, out_path, model_key_getter, part='train', config='27'): + + data=[] + sample_names = [] + + labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter) + fp = np.zeros((len(labels), max_frame, 71, num_channels, max_body_true), dtype=np.float32) + + for i, skel in enumerate(data): + + skel = np.array(skel) + skel = np.moveaxis(skel,1,2) + skel = skel # *256 + + if skel.shape[0] < max_frame: + L = skel.shape[0] + + fp[i,:L,:,:,0] = skel + + rest = max_frame - L + num = int(np.ceil(rest / L)) + pad = np.concatenate([skel for _ in range(num)], 0)[:rest] + fp[i,L:,:,:,0] = pad + + else: + L = skel.shape[0] + + fp[i,:,:,:,0] = skel[:max_frame,:,:] + + + with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f: + pickle.dump((sample_names, labels), f) + + fp = np.transpose(fp, [0, 3, 1, 2, 4]) + print(fp.shape) + np.save('{}/{}_data_joint.npy'.format(out_path, part), fp) + + with open('{}/meaning.pkl'.format(out_path), 'wb') as f: + pickle.dump(retrive_meaning, f) + + + + +if __name__ == '__main__': + + points= '1' + out_folder='../data/sign/' + out_path = os.path.join(out_folder, points) + + kp_model = 'wholepose' # openpose wholepose mediapipe + dataset = "WLASL" # WLASL PUCP_PSL_DGI156 AEC + + model_key_getter = {'mediapipe': get_mp_keys, + 'openpose': get_op_keys, + 'wholepose': get_wp_keys} + + if not os.path.exists(out_path): + os.makedirs(out_path) + + + print('\n',kp_model,'\n') + + part = "train" + data_path = f'../../../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Train.hdf5' + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) + + print(out_path) + part = "val" + data_path = f'../../../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Val.hdf5' + + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh new file mode 100644 index 0000000..62393c6 --- /dev/null +++ b/SL-GCN/runModel.sh @@ -0,0 +1 @@ +python main.py --config config/sign/train/train_joint.yaml \ No newline at end of file From 7223bbbb8be3c9c004a354f666fa908f88d476b3 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:53:50 -0500 Subject: [PATCH 06/56] wandb functions --- SL-GCN/wandbFunctions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index fa93af6..e2bd19a 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -58,4 +58,3 @@ def sendConfusionMatrix(ground_truth, predictions, class_names, cmTrain=True): preds=predictions, class_names=class_names, title="TEST_conf_mat")}) - \ No newline at end of file From c9bed676a1b4a53c91d284f62d3097bc7e2e03bc Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 16:57:57 -0500 Subject: [PATCH 07/56] fix path of split in connecting points --- SL-GCN/data_gen/getConnectingPoint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index 3ebb3c0..707667a 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -112,11 +112,11 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): print('\n',kp_model,'\n') part = "train" - data_path = f'../../../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Train.hdf5' + data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Train.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) print(out_path) part = "val" - data_path = f'../../../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Val.hdf5' + data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Val.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) From d4c7890343a6b88c59acc1a4ed942e70855c1c3a Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 17:08:32 -0500 Subject: [PATCH 08/56] path connections fixes --- SL-GCN/data_gen/getConnectingPoint.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index 707667a..c510d1f 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -98,8 +98,8 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): out_folder='../data/sign/' out_path = os.path.join(out_folder, points) - kp_model = 'wholepose' # openpose wholepose mediapipe - dataset = "WLASL" # WLASL PUCP_PSL_DGI156 AEC + kp_model = 'mediapipe' # openpose wholepose mediapipe + dataset = "AEC" # WLASL PUCP_PSL_DGI156 AEC model_key_getter = {'mediapipe': get_mp_keys, 'openpose': get_op_keys, @@ -112,11 +112,11 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): print('\n',kp_model,'\n') part = "train" - data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Train.hdf5' + data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) print(out_path) part = "val" - data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-v2-Val.hdf5' + data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Val.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) From 6c5ceaa5409ad7a6bd327afdc2740eef9893b3d9 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Tue, 30 Aug 2022 17:54:15 -0500 Subject: [PATCH 09/56] to run different models for 71 points --- SL-GCN/config/sign/train/train_joint.yaml | 4 ++-- SL-GCN/data_gen/getConnectingPoint.py | 15 ++++++++------- SL-GCN/graph/sign_27.py | 5 +++-- SL-GCN/main.py | 5 +++-- SL-GCN/{points.csv => points_71.csv} | 0 5 files changed, 16 insertions(+), 13 deletions(-) rename SL-GCN/{points.csv => points_71.csv} (100%) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index c8bdefd..a6dbeb0 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -26,7 +26,7 @@ test_feeder_args: # 226 (num classes) model: model.decouple_gcn_attn.Model model_args: - num_class: 101 #53 110 # AEC=28, PUCP=36 , WLASL=101 + num_class: 28 #53 110 # AEC=28, PUCP=36 , WLASL=101 num_point: 71 num_person: 1 graph: graph.sign_27.Graph @@ -41,7 +41,7 @@ base_lr: 0.1 step: [150, 200] # training -device: [1] +device: [0, 1] keep_rate: 0.9 only_train_epoch: 1 batch_size: 64 diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index c510d1f..3c03632 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -23,7 +23,7 @@ def get_wp_keys(points): tar = np.array(points.wb_pos)-1 return list(tar) -def read_data(path, model_key_getter): +def read_data(path, model_key_getter, config): data = [] classes = [] videoName = [] @@ -34,7 +34,7 @@ def read_data(path, model_key_getter): videoName.append(f[index]['video_name'][...].item().decode('utf-8')) data.append(f[index]["data"][...]) - points = pd.read_csv("../points.csv") + points = pd.read_csv(f"../points_{config}.csv") tar = model_key_getter(points) @@ -43,7 +43,7 @@ def read_data(path, model_key_getter): meaning = {v:k for (k,v) in enumerate(sorted(set(classes)))} retrive_meaning = {k:v for (k,v) in enumerate(sorted(set(classes)))} - + print(retrive_meaning) labels = [meaning[label] for label in classes] return labels, videoName, data, retrive_meaning @@ -54,7 +54,7 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): data=[] sample_names = [] - labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter) + labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter,config) fp = np.zeros((len(labels), max_frame, 71, num_channels, max_body_true), dtype=np.float32) for i, skel in enumerate(data): @@ -94,12 +94,13 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): if __name__ == '__main__': - points= '1' + points= '1' # just used to create folder "1" in data/sign/1/ out_folder='../data/sign/' out_path = os.path.join(out_folder, points) kp_model = 'mediapipe' # openpose wholepose mediapipe dataset = "AEC" # WLASL PUCP_PSL_DGI156 AEC + numPoints = 71 # number of points used, need to be: 27 or 71 model_key_getter = {'mediapipe': get_mp_keys, 'openpose': get_op_keys, @@ -113,10 +114,10 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): part = "train" data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' - gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) print(out_path) part = "val" data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Val.hdf5' - gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=points) + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) diff --git a/SL-GCN/graph/sign_27.py b/SL-GCN/graph/sign_27.py index 57e3da3..c906e8e 100644 --- a/SL-GCN/graph/sign_27.py +++ b/SL-GCN/graph/sign_27.py @@ -4,13 +4,14 @@ from graph import tools import pandas as pd -points = pd.read_csv("points.csv") +num_node = 71 +points = pd.read_csv(f"points_{num_node}.csv") ori = points.origin tar = points.tarjet inward_ori_index = [(o,t) for o, t in zip(ori, tar)] -num_node = 71 + print("NUM OF NODES:", num_node) self_link = [(i, i) for i in range(num_node)] diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 9da6f60..8625241 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -593,6 +593,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r group_counts = ["{0:0.0f}".format(value) for value in confusion_matrix.flatten()] confusion_matrix = np.asarray([line/np.sum(line) for line in confusion_matrix]) confusion_matrix = np.nan_to_num(confusion_matrix) + df_cm = pd.DataFrame(confusion_matrix * 100, index = meaning, columns=meaning) #size_arr = df_cm.sum(axis = 1) #maxi = max(size_arr) @@ -758,8 +759,8 @@ def import_class(name): "weight-decay": 0.0001, "batch-size":32, "base-lr": 0.05, - "kp-model":"wholepose", - "database":"WLASL"}) + "kp-model":"mediapipe", + "database":"AEC"}) config = wandb.config diff --git a/SL-GCN/points.csv b/SL-GCN/points_71.csv similarity index 100% rename from SL-GCN/points.csv rename to SL-GCN/points_71.csv From 9505bf678025e40deae2180002a9e7cf53ad3402 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Wed, 31 Aug 2022 13:11:04 -0500 Subject: [PATCH 10/56] work with 29 points --- SL-GCN/config/sign/train/train_joint.yaml | 4 +- SL-GCN/data_gen/getConnectingPoint.py | 17 ++-- SL-GCN/feeders/feeder.py | 14 ++- SL-GCN/graph/sign_27.py | 2 +- SL-GCN/main.py | 103 +++++++++++++--------- SL-GCN/points_29.csv | 30 +++++++ SL-GCN/readme.md | 18 ++++ SL-GCN/wandb_sweep.yaml | 27 ++++++ 8 files changed, 154 insertions(+), 61 deletions(-) create mode 100644 SL-GCN/points_29.csv create mode 100644 SL-GCN/wandb_sweep.yaml diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index a6dbeb0..231cd37 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -26,8 +26,8 @@ test_feeder_args: # 226 (num classes) model: model.decouple_gcn_attn.Model model_args: - num_class: 28 #53 110 # AEC=28, PUCP=36 , WLASL=101 - num_point: 71 + num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + num_point: 29 # 29 or 71 num_person: 1 graph: graph.sign_27.Graph groups: 16 diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index 3c03632..13b65cd 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -11,6 +11,7 @@ max_frame = 150 num_channels = 2 +# These three def return an index value less 1 because it array count starts at 1 def get_mp_keys(points): tar = np.array(points.mp_pos)-1 return list(tar) @@ -43,19 +44,19 @@ def read_data(path, model_key_getter, config): meaning = {v:k for (k,v) in enumerate(sorted(set(classes)))} retrive_meaning = {k:v for (k,v) in enumerate(sorted(set(classes)))} - print(retrive_meaning) + labels = [meaning[label] for label in classes] return labels, videoName, data, retrive_meaning -def gendata(data_path, out_path, model_key_getter, part='train', config='27'): +def gendata(data_path, out_path, model_key_getter, part='train', config=1): data=[] sample_names = [] labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter,config) - fp = np.zeros((len(labels), max_frame, 71, num_channels, max_body_true), dtype=np.float32) + fp = np.zeros((len(labels), max_frame, config, num_channels, max_body_true), dtype=np.float32) for i, skel in enumerate(data): @@ -94,13 +95,13 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): if __name__ == '__main__': - points= '1' # just used to create folder "1" in data/sign/1/ + folderName= '1' # just used to create folder "1" in data/sign/1/ out_folder='../data/sign/' - out_path = os.path.join(out_folder, points) + out_path = os.path.join(out_folder, folderName) kp_model = 'mediapipe' # openpose wholepose mediapipe dataset = "AEC" # WLASL PUCP_PSL_DGI156 AEC - numPoints = 71 # number of points used, need to be: 27 or 71 + numPoints = 29 # number of points used, need to be: 29 or 71 model_key_getter = {'mediapipe': get_mp_keys, 'openpose': get_op_keys, @@ -113,11 +114,13 @@ def gendata(data_path, out_path, model_key_getter, part='train', config='27'): print('\n',kp_model,'\n') part = "train" + print(out_path,'->', part) data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) - print(out_path) + part = "val" + print(out_path,'->', part) data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Val.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) diff --git a/SL-GCN/feeders/feeder.py b/SL-GCN/feeders/feeder.py index c2715b7..6323253 100644 --- a/SL-GCN/feeders/feeder.py +++ b/SL-GCN/feeders/feeder.py @@ -7,18 +7,16 @@ sys.path.extend(['../']) from feeders import tools - -# 71 points -flip_index = np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0) - -#flip_index = np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0) +# flip_index for 71 and 29 +flip_index = {71:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0), + 29:np.concatenate(([0,2,1,4,3,6,5,8,7],[9,10,11,12,13,14,15,16,17,18],[19,20,21,22,23,24,25,26,27,28]), axis=0)} class Feeder(Dataset): def __init__(self, data_path, label_path, meaning_path, random_choose=False, random_shift=False, random_move=False, window_size=-1, normalization=False, debug=False, use_mmap=True, random_mirror=False, random_mirror_p=0.5, is_vector=False): - """ + """ :param data_path: :param label_path: :param random_choose: If true, randomly choose a portion of the input sequence @@ -100,8 +98,8 @@ def __getitem__(self, index): if self.random_mirror: if random.random() > self.random_mirror_p: #print("dabe before random mirror", data_numpy) - assert data_numpy.shape[2] == 71 - data_numpy = data_numpy[:,:,flip_index,:] + assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 + data_numpy = data_numpy[:,:,flip_index[data_numpy.shape[2]],:] if self.is_vector: data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] else: diff --git a/SL-GCN/graph/sign_27.py b/SL-GCN/graph/sign_27.py index c906e8e..16849d6 100644 --- a/SL-GCN/graph/sign_27.py +++ b/SL-GCN/graph/sign_27.py @@ -4,7 +4,7 @@ from graph import tools import pandas as pd -num_node = 71 +num_node = 29 # 29 or 71 points = pd.read_csv(f"points_{num_node}.csv") ori = points.origin tar = points.tarjet diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 8625241..a96bd3c 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -28,6 +28,9 @@ import torch.nn.functional as F import wandbFunctions as wandbF import wandb + +wandbFlag = True + # class LabelSmoothingCrossEntropy(nn.Module): # def __init__(self): # super(LabelSmoothingCrossEntropy, self).__init__() @@ -40,7 +43,6 @@ # loss = confidence * nll_loss + smoothing * smooth_loss # return loss.mean() -wandbFlag = True model_name = '' @@ -315,14 +317,15 @@ def load_optimizer(self): params += [{'params': value, 'lr': self.arg.base_lr, 'lr_mult': lr_mult, 'decay_mult': decay_mult, 'weight_decay': weight_decay}] - wandb.config = { - "learning_rate": self.arg.base_lr, - "epochs": self.arg.num_epoch, - "batch_size": self.arg.batch_size, - "weight_decay":self.arg.weight_decay, - "num_class":self.arg.model_args["num_class"], - "momentum":0.9 - } + if wandbFlag: + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"], + "momentum":0.9 + } self.optimizer = optim.SGD( params, momentum=0.9, @@ -332,13 +335,15 @@ def load_optimizer(self): self.model.parameters(), lr=self.arg.base_lr, weight_decay=self.arg.weight_decay) - wandb.config = { - "learning_rate": self.arg.base_lr, - "epochs": self.arg.num_epoch, - "batch_size": self.arg.batch_size, - "weight_decay":self.arg.weight_decay, - "num_class":self.arg.model_args["num_class"] - } + + if wandbFlag: + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"] + } else: raise ValueError() @@ -487,15 +492,15 @@ def train(self, epoch, save_model=False): self.best_tmp_acc = accuracy if epoch+1 == arg.num_epoch: - - wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( - #probs=score, - #y_true=list(label.values()), - #preds=list(predict_label.values()), - y_true=list(target_arr), - preds=list(predict_arr), - class_names=meaning, - title="TRAIN_conf_mat")}) + if wandbFlag: + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + #y_true=list(label.values()), + #preds=list(predict_label.values()), + y_true=list(target_arr), + preds=list(predict_arr), + class_names=meaning, + title="TRAIN_conf_mat")}) if wandbFlag: wandbF.wandbTrainLog(np.mean(loss_value), accuracy) @@ -607,7 +612,9 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r plt.xlabel('Predicted label' ) plt.close(fig_) - wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) + + if wandbFlag: + wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) with open('./work_dir/' + arg.Experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( epoch, accuracy), 'wb') as f: @@ -622,12 +629,12 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r if epoch + 1 == arg.num_epoch: - - wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ - labels=meaning, classes_to_plot=None)}) - - wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, - labels=meaning, classes_to_plot=None)}) + if wandbFlag: + wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ + labels=meaning, classes_to_plot=None)}) + + wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, + labels=meaning, classes_to_plot=None)}) #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, # , meaning_3)}) @@ -753,16 +760,25 @@ def import_class(name): if __name__ == '__main__': parser = get_parser() - wandb.init(project="Connecting-points", - entity="joenatan30", - config={"num-epoch": 500, - "weight-decay": 0.0001, - "batch-size":32, - "base-lr": 0.05, - "kp-model":"mediapipe", - "database":"AEC"}) + config = { + # + "num-epoch": 500, + "weight-decay": 0.0001, + "batch-size":32, + "base-lr": 0.05, + "kp-model":"mediapipe", + "database":"AEC", + + # This parameter is only used for wandb reports - not for the model + "num_points": 29 + } + + if wandbFlag: + wandb.init(project="Connecting-points", + entity="joenatan30", + config=config) - config = wandb.config + config = wandb.config # load arg form config file p = parser.parse_args() @@ -791,8 +807,9 @@ def import_class(name): runAndModelName = arg.kp_model + '-' + arg.database + "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.model_args["num_class"]) + "-Batch" + str(arg.batch_size) model_name = runAndModelName - wandb.run.name = runAndModelName - wandb.run.save() + if wandbFlag: + wandb.run.name = runAndModelName + wandb.run.save() init_seed(0) processor = Processor(arg) diff --git a/SL-GCN/points_29.csv b/SL-GCN/points_29.csv new file mode 100644 index 0000000..054a3bf --- /dev/null +++ b/SL-GCN/points_29.csv @@ -0,0 +1,30 @@ +tar_name,ori_name,mp_pos,wb_pos,op_pos,origin,tarjet +pose_nose,pose_nose,1,1,1,1,1 +pose_left_eye,pose_nose,3,2,17,1,2 +pose_right_eye,pose_nose,6,3,16,1,3 +pose_left_shoulder,pose_nose,12,6,6,1,4 +pose_right_shoulder,pose_nose,13,7,3,1,5 +pose_left_elbow,pose_left_shoulder,14,8,7,4,6 +pose_right_elbow,pose_right_shoulder,15,9,4,5,7 +pose_left_wrist,pose_left_elbow,16,10,8,6,8 +pose_right_wrist,pose_right_elbow,17,11,5,7,9 +leftHand_thumb_mcp,pose_left_wrist,504,94,98,8,10 +leftHand_thumb_tip,leftHand_thumb_mcp,506,96,100,10,11 +leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,12 +leftHand_index_finger_tip,leftHand_index_finger_mcp,510,100,104,12,13 +leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,14 +leftHand_middle_finger_tip,leftHand_middle_finger_mcp,514,104,108,14,15 +leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,16 +leftHand_ring_finger_tip,leftHand_ring_finger_mcp,518,108,112,16,17 +leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,18 +leftHand_pinky_tip,leftHand_pinky_mcp,522,112,116,18,19 +rightHand_thumb_mcp,pose_right_wrist,525,115,119,9,20 +rightHand_thumb_tip,rightHand_thumb_mcp,527,117,121,20,21 +rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,22 +rightHand_index_finger_tip,rightHand_index_finger_mcp,531,121,125,22,23 +rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,24 +rightHand_middle_finger_tip,rightHand_middle_finger_mcp,535,125,129,24,25 +rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,26 +rightHand_ring_finger_tip,rightHand_ring_finger_mcp,539,129,133,26,27 +rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,28 +rightHand_pinky_tip,rightHand_pinky_mcp,543,133,137,28,29 diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 61e7375..205f029 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -1,4 +1,22 @@ # Skeleton Based Sign Language Recognition + +## To use with Connecting Points + +### Generate smile-lab data split (from the split of connecting points) and Smile-lab model variable preparation to train +1. Run "getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) + +2. Modify "num_point", "num_class" and "device" variable of the yaml file "/config/sign/train/train_joint.yaml" as it is needed (same as setted in the previous step) + +3. Modify "num_node" variable in sign_27 + +4. Go to "if __name__ == '__main__':" section of main.py (in SL-GCN folder) and modify "config" paremeters + +5. run +``` +python main.py --config config/sign/train/train_joint.yaml +``` + +--------------------------- ## Data preparation 1. Extract whole-body keypoints data following the instruction in ../data_process/wholepose 2. Run the following code to prepare the data for GCN. diff --git a/SL-GCN/wandb_sweep.yaml b/SL-GCN/wandb_sweep.yaml new file mode 100644 index 0000000..0d536e3 --- /dev/null +++ b/SL-GCN/wandb_sweep.yaml @@ -0,0 +1,27 @@ +command: + - ${env} + - python + - ${program} + - --config + - config/sign/train/train_joint.yaml + - ${args} +method: grid +name: MergeData-Seed-42 +parameters: + base-lr: + values: + - 7 + - 1.7 + - 0.7 + - 0.07 + batch-size: + values: + - 32 + - 16 + num-epoch: + values: + - 250 + weight-decay: + values: + - 1e-05 +program: main.py \ No newline at end of file From 4edaffd67d4ca769642acc0f0f76c7512a671ccd Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Wed, 31 Aug 2022 13:15:13 -0500 Subject: [PATCH 11/56] Connecting points respository --- SL-GCN/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 205f029..5a356d2 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -1,6 +1,6 @@ # Skeleton Based Sign Language Recognition -## To use with Connecting Points +## To use with [Connecting Points](https://github.com/JoeNatan30/ConnectingPoints) repository ### Generate smile-lab data split (from the split of connecting points) and Smile-lab model variable preparation to train 1. Run "getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) From b260e97e968fbeebd9c2732f4ab0f636c5b86367 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Wed, 31 Aug 2022 13:28:24 -0500 Subject: [PATCH 12/56] note in readme --- SL-GCN/readme.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 5a356d2..6d045a7 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -16,6 +16,8 @@ python main.py --config config/sign/train/train_joint.yaml ``` +Note: if you don't have a wandb account, you need to set "wandbFlag" variable of "main.py" to False and modify the code to have reports + --------------------------- ## Data preparation 1. Extract whole-body keypoints data following the instruction in ../data_process/wholepose From f73a75ca5225334e3a9b3868dcaec9db27ecac00 Mon Sep 17 00:00:00 2001 From: JoeNatan30 Date: Mon, 5 Sep 2022 21:08:34 -0500 Subject: [PATCH 13/56] file output fixes and readme --- SL-GCN/data_gen/getConnectingPoint.py | 6 ++-- SL-GCN/main.py | 7 ++-- SL-GCN/readme.md | 52 --------------------------- 3 files changed, 8 insertions(+), 57 deletions(-) diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index 13b65cd..20b7beb 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -99,8 +99,8 @@ def gendata(data_path, out_path, model_key_getter, part='train', config=1): out_folder='../data/sign/' out_path = os.path.join(out_folder, folderName) - kp_model = 'mediapipe' # openpose wholepose mediapipe - dataset = "AEC" # WLASL PUCP_PSL_DGI156 AEC + kp_model = 'wholepose' # openpose wholepose mediapipe + dataset = "WLASL" # WLASL PUCP_PSL_DGI156 AEC numPoints = 29 # number of points used, need to be: 29 or 71 model_key_getter = {'mediapipe': get_mp_keys, @@ -111,7 +111,7 @@ def gendata(data_path, out_path, model_key_getter, part='train', config=1): os.makedirs(out_path) - print('\n',kp_model,'\n') + print('\n',kp_model, dataset,'\n') part = "train" print(out_path,'->', part) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index a96bd3c..f010751 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -432,9 +432,11 @@ def train(self, epoch, save_model=False): if 'DecoupleA' in key: value.requires_grad = False print(key + '-not require grad') + meaning = list(self.meaning.values()) for batch_idx, (data, label, index, name) in enumerate(process): + self.global_step += 1 label_tmp = label.cpu().numpy() @@ -450,6 +452,7 @@ def train(self, epoch, save_model=False): keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 else: keep_prob = self.arg.keep_rate + output = self.model(data, keep_prob) if isinstance(output, tuple): @@ -770,7 +773,7 @@ def import_class(name): "database":"AEC", # This parameter is only used for wandb reports - not for the model - "num_points": 29 + "num_points": 29 } if wandbFlag: @@ -801,7 +804,7 @@ def import_class(name): arg.kp_model = config["kp-model"] arg.database = config["database"] - arg.file_name = "./save_models/"+ arg.Experiment_name + arg.model_saved_name + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + arg.file_name = f"./save_models/{arg.Experiment_name}{arg.model_saved_name}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.model_args['num_class'])}-{str(config['num_points'])}" os.makedirs(arg.file_name,exist_ok=True) runAndModelName = arg.kp_model + '-' + arg.database + "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.model_args["num_class"]) + "-Batch" + str(arg.batch_size) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 6d045a7..cccaddd 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -19,55 +19,3 @@ python main.py --config config/sign/train/train_joint.yaml Note: if you don't have a wandb account, you need to set "wandbFlag" variable of "main.py" to False and modify the code to have reports --------------------------- -## Data preparation -1. Extract whole-body keypoints data following the instruction in ../data_process/wholepose -2. Run the following code to prepare the data for GCN. - - cd data_gen/ - python sign_gendata.py - python gen_bone_data.py - python gen_motion.py -## Usage -### Train: -``` -python main.py --config config/sign/train/train_joint.yaml - -python main.py --config config/sign/train/train_bone.yaml - -python main.py --config config/sign/train/train_joint_motion.yaml - -python main.py --config config/sign/train/train_bone_motion.yaml -``` -### Finetune: -``` -python main.py --config config/sign/finetune/train_joint.yaml - -python main.py --config config/sign/finetune/train_bone.yaml - -python main.py --config config/sign/finetune/train_joint_motion.yaml - -python main.py --config config/sign/finetune/train_bone_motion.yaml -``` -### Test: -``` -python main.py --config config/sign/test/test_joint.yaml - -python main.py --config config/sign/test/test_bone.yaml - -python main.py --config config/sign/test/test_joint_motion.yaml - -python main.py --config config/sign/test/test_bone_motion.yaml -``` -### Test Finetuned: -``` -python main.py --config config/sign/test_finetuned/test_joint.yaml - -python main.py --config config/sign/test_finetuned/test_bone.yaml - -python main.py --config config/sign/test_finetuned/test_joint_motion.yaml - -python main.py --config config/sign/test_finetuned/test_bone_motion.yaml -``` -### Multi-stream ensemble: -1. Copy the results .pkl files from all streams (joint, bone, joint motion and bone motion) to ../ensemble/gcn and renamed them correctly. -2. Follow the instruction in ../ensemble/gcn to obtained the results of multi-stream ensemble. \ No newline at end of file From a6038b80a62a992492a489b147f7d423d8dea202 Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Wed, 7 Sep 2022 03:25:22 +0000 Subject: [PATCH 14/56] automatizacion v1 estable --- SL-GCN/config/sign/train/train_joint.yaml | 19 +- SL-GCN/data/sign/27_2/gen_train_val.py | 14 - SL-GCN/data_gen/getConnectingPoint.py | 29 +- SL-GCN/graph/sign_27.py | 39 ++- SL-GCN/main.py | 406 +++++++++++++--------- SL-GCN/model/decouple_gcn_attn.py | 1 + SL-GCN/readme.md | 2 +- SL-GCN/runModel.sh | 56 ++- SL-GCN/save_models/.gitkeep | 0 SL-GCN/wandbFunctions.py | 5 +- SL-GCN/work_dir/.gitkeep | 0 11 files changed, 352 insertions(+), 219 deletions(-) delete mode 100644 SL-GCN/data/sign/27_2/gen_train_val.py delete mode 100644 SL-GCN/save_models/.gitkeep delete mode 100644 SL-GCN/work_dir/.gitkeep diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 231cd37..1aec012 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -1,11 +1,11 @@ -Experiment_name: sign_joint_final +#Experiment_name: sign_joint_final # feeder feeder: feeders.feeder.Feeder train_feeder_args: - data_path: ./data/sign/1/train_data_joint.npy - label_path: ./data/sign/1/train_label.pkl - meaning_path: ./data/sign/1/meaning.pkl + data_path: data/sign/1/train_data_joint.npy + label_path: data/sign/1/train_label.pkl + meaning_path: data/sign/1/meaning.pkl debug: False random_choose: True window_size: 100 @@ -16,9 +16,9 @@ train_feeder_args: is_vector: False test_feeder_args: - data_path: ./data/sign/1/val_data_joint.npy - label_path: ./data/sign/1/val_label.pkl - meaning_path: ./data/sign/1/meaning.pkl + data_path: data/sign/1/val_data_joint.npy + label_path: data/sign/1/val_label.pkl + meaning_path: data/sign/1/meaning.pkl random_mirror: False normalization: True @@ -26,14 +26,15 @@ test_feeder_args: # 226 (num classes) model: model.decouple_gcn_attn.Model model_args: - num_class: 28 # AEC=28, PUCP=36 , WLASL=101 - num_point: 29 # 29 or 71 + #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + #num_point: 29 # 29 or 71 num_person: 1 graph: graph.sign_27.Graph groups: 16 block_size: 41 graph_args: labeling_mode: 'spatial' + num_node: 29 #optim weight_decay: 0.0001 diff --git a/SL-GCN/data/sign/27_2/gen_train_val.py b/SL-GCN/data/sign/27_2/gen_train_val.py deleted file mode 100644 index 816e693..0000000 --- a/SL-GCN/data/sign/27_2/gen_train_val.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np - -parts = {'joint', 'bone2', 'joint_motion', 'bone2_motion'} - -for part in parts: - print(part) - data_train = np.load('train_data_{}.npy'.format(part)) - data_val = np.load('val_data_{}.npy'.format(part)) - - data_train_val = np.concatenate((data_train, data_val), axis=0) - print(data_train_val.shape) - - - np.save('train_val_data_{}.npy'.format(part), data_train_val) \ No newline at end of file diff --git a/SL-GCN/data_gen/getConnectingPoint.py b/SL-GCN/data_gen/getConnectingPoint.py index 20b7beb..cd37a7b 100644 --- a/SL-GCN/data_gen/getConnectingPoint.py +++ b/SL-GCN/data_gen/getConnectingPoint.py @@ -29,15 +29,33 @@ def read_data(path, model_key_getter, config): classes = [] videoName = [] + if 'AEC' in path: + list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] + + if 'PUCP' in path: + list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] + list_labels_banned += ["sí","ella","uno","ese","ah","dijo","llamar"] + + if 'WLASL' in path: + list_labels_banned = ['apple','computer','fish','kiss','later','no','orange','pizza','purple','secretary','shirt','sunday','take','water','yellow'] + + with h5py.File(path, "r") as f: for index in f.keys(): - classes.append(f[index]['label'][...].item().decode('utf-8')) + label = f[index]['label'][...].item().decode('utf-8') + + if str(label) in list_labels_banned: + continue + + classes.append(label) videoName.append(f[index]['video_name'][...].item().decode('utf-8')) data.append(f[index]["data"][...]) - - points = pd.read_csv(f"../points_{config}.csv") + + print('config : ',config) + points = pd.read_csv(f"points_{config}.csv") tar = model_key_getter(points) + print('tart',tar) data = [d[:,:,tar] for d in data] @@ -47,6 +65,9 @@ def read_data(path, model_key_getter, config): labels = [meaning[label] for label in classes] + print('meaning',meaning) + print('retrive_meaning',retrive_meaning) + return labels, videoName, data, retrive_meaning @@ -115,7 +136,7 @@ def gendata(data_path, out_path, model_key_getter, part='train', config=1): part = "train" print(out_path,'->', part) - data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' + data_path = f'../../../../joe/ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) diff --git a/SL-GCN/graph/sign_27.py b/SL-GCN/graph/sign_27.py index 16849d6..7259295 100644 --- a/SL-GCN/graph/sign_27.py +++ b/SL-GCN/graph/sign_27.py @@ -4,16 +4,7 @@ from graph import tools import pandas as pd -num_node = 29 # 29 or 71 -points = pd.read_csv(f"points_{num_node}.csv") -ori = points.origin -tar = points.tarjet -inward_ori_index = [(o,t) for o, t in zip(ori, tar)] - - -print("NUM OF NODES:", num_node) -self_link = [(i, i) for i in range(num_node)] ''' inward_ori_index = [(5, 6), (5, 7), @@ -43,25 +34,35 @@ ] ''' -inward = [(i - 1, j - 1) for (i, j) in inward_ori_index] -outward = [(j, i) for (i, j) in inward] -neighbor = inward + outward class Graph: - def __init__(self, labeling_mode='spatial'): - self.A = self.get_adjacency_matrix(labeling_mode) + def __init__(self, labeling_mode='spatial',num_node=29): self.num_node = num_node - self.self_link = self_link - self.inward = inward - self.outward = outward - self.neighbor = neighbor + #num_node = 29 # 29 or 71 + points = pd.read_csv(f"points_{self.num_node}.csv") + ori = points.origin + tar = points.tarjet + + self.inward_ori_index = [(o,t) for o, t in zip(ori, tar)] + + + self.self_link = [(i, i) for i in range(self.num_node)] + + self.inward = [(i - 1, j - 1) for (i, j) in self.inward_ori_index] + self.outward = [(j, i) for (i, j) in self.inward] + self.neighbor = self.inward + self.outward + + print("NUM OF NODES:", self.num_node) + + + self.A = self.get_adjacency_matrix(labeling_mode) def get_adjacency_matrix(self, labeling_mode=None): if labeling_mode is None: return self.A if labeling_mode == 'spatial': - A = tools.get_spatial_graph(num_node, self_link, inward, outward) + A = tools.get_spatial_graph(self.num_node, self.self_link, self.inward, self.outward) else: raise ValueError() return A diff --git a/SL-GCN/main.py b/SL-GCN/main.py index f010751..15d3823 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -28,6 +28,8 @@ import torch.nn.functional as F import wandbFunctions as wandbF import wandb +import time +from data_gen.getConnectingPoint import * wandbFlag = True @@ -45,6 +47,32 @@ model_name = '' +def create_one_folder(directory): + if not os.path.exists(directory): + os.makedirs(directory) + +def create_folder(directory): + path = directory.split('/') + total_path ='' + for i in path: + total_path = os.path.join(total_path,i) + print(i, ' create : ',total_path) + create_one_folder(total_path) + + print('directory : ',directory) + create_one_folder(directory) + create_one_folder(directory+'/') + + create_one_folder(directory) + create_one_folder(directory+'/ga') + time.sleep(2) + + create_one_folder(directory) + create_one_folder(directory+'/ga') + + time.sleep(5) + + print('created paths') def init_seed(_): torch.cuda.manual_seed_all(1) @@ -58,144 +86,66 @@ def init_seed(_): def get_parser(): # parameter priority: command line > config > default - parser = argparse.ArgumentParser( - description='Decoupling Graph Convolution Network with DropGraph Module') - parser.add_argument( - '--work-dir', - default='./work_dir/temp', - help='the work folder for storing results') - - parser.add_argument('-model_saved_name', default='') - parser.add_argument('-Experiment_name', default='') - parser.add_argument( - '--config', - default='./config/nturgbd-cross-view/test_bone.yaml', - help='path to the configuration file') + parser = argparse.ArgumentParser(description='Decoupling Graph Convolution Network with DropGraph Module') + parser.add_argument('--work-dir',default='./work_dir/temp',help='the work folder for storing results') + + parser.add_argument('-model_saved_directory', default='') + parser.add_argument('-experiment_name', default='') + parser.add_argument('--config',default='./config/nturgbd-cross-view/test_bone.yaml',help='path to the configuration file') # processor - parser.add_argument( - '--phase', default='train', help='must be train or test') - parser.add_argument( - '--save-score', - type=str2bool, - default=False, - help='if ture, the classification score will be stored') + parser.add_argument('--phase', default='train', help='must be train or test') + parser.add_argument('--save-score',type=str2bool,default=False,help='if ture, the classification score will be stored') # visulize and debug - parser.add_argument( - '--seed', type=int, default=1, help='random seed for pytorch') - parser.add_argument( - '--log-interval', - type=int, - default=100, - help='the interval for printing messages (#iteration)') - parser.add_argument( - '--save-interval', - type=int, - default=2, - help='the interval for storing models (#iteration)') - parser.add_argument( - '--eval-interval', - type=int, - default=5, - help='the interval for evaluating models (#iteration)') - parser.add_argument( - '--print-log', - type=str2bool, - default=True, - help='print logging or not') - parser.add_argument( - '--show-topk', - type=int, - default=[1, 5], - nargs='+', - help='which Top K accuracy will be shown') + parser.add_argument('--seed', type=int, default=1, help='random seed for pytorch') + parser.add_argument('--log-interval',type=int,default=100,help='the interval for printing messages (#iteration)') + parser.add_argument('--save-interval',type=int,default=2,help='the interval for storing models (#iteration)') + parser.add_argument('--eval-interval',type=int,default=5,help='the interval for evaluating models (#iteration)') + parser.add_argument('--print-log',type=str2bool,default=True,help='print logging or not') + parser.add_argument('--show-topk',type=int,default=[1, 5],nargs='+',help='which Top K accuracy will be shown') # feeder - parser.add_argument( - '--feeder', default='feeder.feeder', help='data loader will be used') - parser.add_argument( - '--num-worker', - type=int, - default=32, - help='the number of worker for data loader') - parser.add_argument( - '--train-feeder-args', - default=dict(), - help='the arguments of data loader for training') - parser.add_argument( - '--test-feeder-args', - default=dict(), - help='the arguments of data loader for test') + parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used') + parser.add_argument('--num-worker',type=int,default=32,help='the number of worker for data loader') + parser.add_argument('--train-feeder-args',default=dict(),help='the arguments of data loader for training') + parser.add_argument('--test-feeder-args',default=dict(),help='the arguments of data loader for test') # model parser.add_argument('--model', default=None, help='the model will be used') - parser.add_argument( - '--model-args', - type=dict, - default=dict(), - help='the arguments of model') - parser.add_argument( - '--weights', - default=None, - help='the weights for network initialization') - parser.add_argument( - '--ignore-weights', - type=str, - default=[], - nargs='+', - help='the name of weights which will be ignored in the initialization') + parser.add_argument('--model-args',type=dict,default=dict(),help='the arguments of model') + parser.add_argument('--weights',default=None,help='the weights for network initialization') + parser.add_argument('--ignore-weights',type=str,default=[],nargs='+',help='the name of weights which will be ignored in the initialization') # optim - parser.add_argument( - '--base-lr', type=float, default=0.01, help='initial learning rate') - parser.add_argument( - '--step', - type=int, - default=[20, 40, 60], - nargs='+', - help='the epoch where optimizer reduce the learning rate') - parser.add_argument( - '--device', - type=int, - default=0, - nargs='+', - help='the indexes of GPUs for training or testing') + parser.add_argument('--base_lr', type=float, default=0.05, help='initial learning rate') + parser.add_argument('--num_epoch',type=int,default=500,help='stop training in which epoch') + + parser.add_argument('--step',type=int,default=[20, 40, 60],nargs='+',help='the epoch where optimizer reduce the learning rate') + parser.add_argument('--device',type=int,default=0,nargs='+',help='the indexes of GPUs for training or testing') parser.add_argument('--optimizer', default='SGD', help='type of optimizer') - parser.add_argument( - '--nesterov', type=str2bool, default=False, help='use nesterov or not') - parser.add_argument( - '--batch-size', type=int, default=256, help='training batch size') - parser.add_argument( - '--test-batch-size', type=int, default=256, help='test batch size') - parser.add_argument( - '--start-epoch', - type=int, - default=0, - help='start training from which epoch') - parser.add_argument( - '--num-epoch', - type=int, - default=80, - help='stop training in which epoch') - parser.add_argument( - '--weight-decay', - type=float, - default=0.0005, - help='weight decay for optimizer') - parser.add_argument( - '--keep_rate', - type=float, - default=0.9, - help='keep probability for drop') - parser.add_argument( - '--groups', - type=int, - default=8, - help='decouple groups') + parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not') + parser.add_argument('--batch-size', type=int, default=32, help='training batch size') + parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size') + parser.add_argument('--start-epoch',type=int,default=0,help='start training from which epoch') + parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer') + parser.add_argument('--keep_rate',type=float,default=0.9,help='keep probability for drop') + parser.add_argument('--groups',type=int,default=8,help='decouple groups') parser.add_argument('--only_train_part', default=True) parser.add_argument('--only_train_epoch', default=0) parser.add_argument('--warm_up_epoch', default=0) + + # Data + + parser.add_argument("--experiment_name", type=str, default="", help="Path to the training dataset CSV file") + parser.add_argument("--training_set_path", type=str, default="", help="Path to the training dataset CSV file") + parser.add_argument("--keypoints_model", type=str, default="openpose", help="Path to the training dataset CSV file") + parser.add_argument("--keypoints_number", type=int, default=29, help="Path to the training dataset CSV file") + parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--num_class", type=int, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") + return parser @@ -206,22 +156,26 @@ class Processor(): def __init__(self, arg): - arg.model_saved_name = arg.file_name + '/' + arg.Experiment_name - arg.work_dir = "./work_dir/" + arg.Experiment_name + self.arg = arg self.save_arg() + self.connectingPoints(arg) + + if arg.phase == 'train': + pass + ''' if not arg.train_feeder_args['debug']: - if os.path.isdir(arg.model_saved_name): - print('log_dir: ', arg.model_saved_name, 'already exist') - answer = input('delete it? y/n:') + if os.path.isdir(arg.model_saved_directory): + print('log_dir: ', arg.model_saved_directory, 'already exist') + answer = 'y'#input('delete it? y/n:') if answer == 'y': - shutil.rmtree(arg.model_saved_name) - print('Dir removed: ', arg.model_saved_name) - input( - 'Refresh the website of tensorboard by pressing any keys') + shutil.rmtree(arg.model_saved_directory) + print('Dir removed: ', arg.model_saved_directory) + #input('Refresh the website of tensorboard by pressing any keys') else: - print('Dir not removed: ', arg.model_saved_name) + print('Dir not removed: ', arg.model_saved_directory) + ''' self.global_step = 0 self.load_model() @@ -231,6 +185,37 @@ def __init__(self, arg): self.best_acc = 0 self.best_tmp_acc = 0 + + def connectingPoints(self,arg): + print('Creating points .. ') + + folderName= '1' # just used to create folder "1" in data/sign/1/ + out_folder='data/sign/' + out_path = os.path.join(out_folder, folderName) + + kp_model = arg.kp_model# 'wholepose' # openpose wholepose mediapipe + dataset = arg.experiment_name# "PUCP" # WLASL PUCP_PSL_DGI156 AEC + numPoints = arg.keypoints_number # number of points used, need to be: 29 or 71 + data_path_train = arg.training_set_path #f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Train.hdf5' + data_path_test = arg.testing_set_path#f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Val.hdf5' + + + model_key_getter = {'mediapipe': get_mp_keys, + 'openpose': get_op_keys, + 'wholepose': get_wp_keys} + + if not os.path.exists(out_path): + os.makedirs(out_path) + + print('kp_model',kp_model) + print('\n',kp_model, dataset,'\n') + print(out_path,'->', 'train') + gendata(data_path_train, out_path, model_key_getter[kp_model], part='train', config=numPoints) + print(out_path,'->', 'val') + gendata(data_path_test, out_path, model_key_getter[kp_model], part='val', config=numPoints) + print('Creating points completed!!! ') + + def load_data(self): Feeder = import_class(self.arg.feeder) ln = Feeder(**self.arg.test_feeder_args) @@ -257,7 +242,14 @@ def load_model(self): output_device = self.arg.device[0] if type( self.arg.device) is list else self.arg.device self.output_device = output_device + + print('^'*20) + print('self.arg.model',self.arg.model) + print('model_args',self.arg.model_args) + Model = import_class(self.arg.model) + + shutil.copy2(inspect.getfile(Model), self.arg.work_dir) self.model = Model(**self.arg.model_args).cuda(output_device) # print(self.model) @@ -579,7 +571,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r str(x) + ',' + str(true[i]) + '\n') score = np.concatenate(score_frag) - if 'UCLA' in arg.Experiment_name: + if 'UCLA' in arg.experiment_name: self.data_loader[ln].dataset.sample_name = np.arange( len(score)) @@ -593,13 +585,34 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r zip(self.data_loader[ln].dataset.sample_name, score)) conf_mat = torchmetrics.ConfusionMatrix(num_classes=self.arg.model_args["num_class"]) + ''' + print('self.arg.model_args["num_class"]',self.arg.model_args["num_class"]) + + print('list(submission.values())',list(submission.values())) + print('set(list(submission.values()))',set(list(submission.values()))) + print('len(set(list(submission.values())))',len(set(list(submission.values())))) + + print('list(trueLabels.values())',list(trueLabels.values())) + print('set(list(trueLabels.values()))',set(list(trueLabels.values()))) + print('len(set(list(trueLabels.values())))',len(set(list(trueLabels.values())))) + ''' confusion_matrix = conf_mat(torch.tensor(list(submission.values())).cpu(), torch.tensor(list(trueLabels.values())).cpu()) confusion_matrix = confusion_matrix.detach().cpu().numpy() plt.figure(figsize = (10,7)) group_counts = ["{0:0.0f}".format(value) for value in confusion_matrix.flatten()] - confusion_matrix = np.asarray([line/np.sum(line) for line in confusion_matrix]) + ''' + print('confusion_matrix') + print(confusion_matrix) + print('len confusion_matrix') + + print(len(confusion_matrix)) + for line in confusion_matrix: + print('line',line) + print(len(line)) + ''' + confusion_matrix = np.asarray([line/(np.sum(line)+0.0001) for line in confusion_matrix]) confusion_matrix = np.nan_to_num(confusion_matrix) df_cm = pd.DataFrame(confusion_matrix * 100, index = meaning, columns=meaning) @@ -619,7 +632,15 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r if wandbFlag: wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) - with open('./work_dir/' + arg.Experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( + + print('*'*20) + print('*'*20) + print('*'*20) + + print('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl') + + + with open('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( epoch, accuracy), 'wb') as f: pickle.dump(score_dict, f) @@ -627,7 +648,13 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r state_dict = self.model.state_dict() weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()]) - torch.save(weights, self.arg.model_saved_name + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + + print('*'*20) + print('*'*20) + print('*'*20) + print(self.arg.model_saved_directory) + print(self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + torch.save(weights, self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') if epoch + 1 == arg.num_epoch: @@ -651,7 +678,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r ''' print('Eval Accuracy: ', accuracy, - ' model: ', self.arg.model_saved_name) + ' model: ', self.arg.model_saved_directory) if wandbFlag: wandbF.wandbValLog(np.mean(loss_value), accuracy, top5) @@ -663,7 +690,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r self.print_log('\tTop{}: {:.2f}%'.format( k, 100 * self.data_loader[ln].dataset.top_k(score, k))) ''' - with open('./work_dir/' + arg.Experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format( + with open('./work_dir/' + arg.experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format( epoch, accuracy), 'wb') as f: pickle.dump(score_dict, f) ''' @@ -725,12 +752,12 @@ def start(self): # self.lr_scheduler.step(val_loss) print('best accuracy: ', self.best_acc, - ' model_name: ', self.arg.model_saved_name) + ' model_name: ', self.arg.model_saved_directory) elif self.arg.phase == 'test': if not self.arg.test_feeder_args['debug']: - wf = self.arg.model_saved_name + '_wrong.txt' - rf = self.arg.model_saved_name + '_right.txt' + wf = self.arg.model_saved_directory + '_wrong.txt' + rf = self.arg.model_saved_directory + '_right.txt' else: wf = rf = None if self.arg.weights is None: @@ -762,41 +789,63 @@ def import_class(name): if __name__ == '__main__': parser = get_parser() + + # load arg form config file + arg = parser.parse_args() - config = { - # - "num-epoch": 500, - "weight-decay": 0.0001, - "batch-size":32, - "base-lr": 0.05, - "kp-model":"mediapipe", - "database":"AEC", - - # This parameter is only used for wandb reports - not for the model - "num_points": 29 - } - - if wandbFlag: - wandb.init(project="Connecting-points", - entity="joenatan30", - config=config) - - config = wandb.config - # load arg form config file - p = parser.parse_args() - if p.config is not None: - with open(p.config, 'r') as f: + print('arg.config',arg.config) + if arg.config is not None: + with open(arg.config, 'r') as f: #default_arg = yaml.load(f) default_arg = yaml.safe_load(f) - key = vars(p).keys() + print('default_arg',default_arg) + key = vars(arg).keys() for k in default_arg.keys(): if k not in key: print('WRONG ARG: {}'.format(k)) assert (k in key) parser.set_defaults(**default_arg) + # load arg form config file arg = parser.parse_args() + + arg.model_args['num_class'] =arg.num_class + arg.model_args['num_point'] =arg.keypoints_number + + arg.model_args['graph_args']['num_node'] =arg.keypoints_number + + #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + #num_point: 29 # 29 or 71 + + # arg.training_set_path + # arg.keypoints_model + # arg.keypoints_number + # arg.testing_set_path + # arg.experiment_name + # arg.base_lr + # arg.num_epoch + + + config = { + # + "num-epoch": arg.num_epoch, + "weight-decay": arg.weight_decay, + "batch-size":arg.batch_size, + "base-lr": arg.base_lr, + "kp-model": arg.keypoints_model, + "num_points": arg.keypoints_number, + "database": arg.database, + "mode_train":arg.mode_train, + } + + if wandbFlag: + wandb.init(project="sign_language_project", + entity="ml_projects", + config=config) + + config = wandb.config + arg.base_lr = config["base-lr"] arg.batch_size = config["batch-size"] arg.weight_decay = config["weight-decay"] @@ -804,16 +853,35 @@ def import_class(name): arg.kp_model = config["kp-model"] arg.database = config["database"] - arg.file_name = f"./save_models/{arg.Experiment_name}{arg.model_saved_name}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.model_args['num_class'])}-{str(config['num_points'])}" - os.makedirs(arg.file_name,exist_ok=True) + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + arg.work_dir = "work_dir/"+arg.experiment_name+"/" + + print('*'*20) + print('*'*20) + + print('model_saved_directory',arg.model_saved_directory) + print('work_dir',arg.work_dir) - runAndModelName = arg.kp_model + '-' + arg.database + "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.model_args["num_class"]) + "-Batch" + str(arg.batch_size) + + create_folder(arg.model_saved_directory) + create_folder(arg.work_dir) + create_folder('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/') + + # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} + #os.makedirs(arg.file_name,exist_ok=True) + + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.num_class) + "-Batch" + str(arg.batch_size) model_name = runAndModelName + print('model_name : ',model_name) if wandbFlag: wandb.run.name = runAndModelName wandb.run.save() init_seed(0) + + print(arg) + print(arg.train_feeder_args) + print('train_feeder_args',arg.train_feeder_args) processor = Processor(arg) processor.start() diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index abf8b49..9f8bbae 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -230,6 +230,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz else: Graph = import_class(graph) self.graph = Graph(**graph_args) + self.graph.num_node = num_point A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index cccaddd..851d5f4 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -3,7 +3,7 @@ ## To use with [Connecting Points](https://github.com/JoeNatan30/ConnectingPoints) repository ### Generate smile-lab data split (from the split of connecting points) and Smile-lab model variable preparation to train -1. Run "getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) +1. Run "data_gen/getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) 2. Modify "num_point", "num_class" and "device" variable of the yaml file "/config/sign/train/train_joint.yaml" as it is needed (same as setted in the previous step) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 62393c6..bb2df16 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -1 +1,55 @@ -python main.py --config config/sign/train/train_joint.yaml \ No newline at end of file +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + + #num_point: 29 # 29 or 71 + +''' +python main.py --experiment_name results/f_29/PUCP/cris_wholepose_PUCP --database PUCP --keypoints_model wholepose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_mediapipe_PUCP --database PUCP --keypoints_model mediapipe --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/WLASL/cris_wholepose_WLASL --database WLASL --keypoints_model wholepose --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_openpose_WLASL --database WLASL --keypoints_model openpose --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + + +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_mediapipe_AEC --database AEC --keypoints_model mediapipe --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + + +python main.py --experiment_name results/f_71/PUCP/cris_wholepose_PUCP --database PUCP --keypoints_model wholepose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_mediapipe_PUCP --database PUCP --keypoints_model mediapipe --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/WLASL/cris_wholepose_WLASL --database WLASL --keypoints_model wholepose --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_openpose_WLASL --database WLASL --keypoints_model openpose --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + + +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_mediapipe_AEC --database AEC --keypoints_model mediapipe --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +''' + +########## tunning ###########33 + +''' +python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.00025 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.00005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.00025 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.00005 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +''' diff --git a/SL-GCN/save_models/.gitkeep b/SL-GCN/save_models/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index e2bd19a..cb55e62 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -7,12 +7,12 @@ """ import wandb - +''' def initConfigWandb(num_layers, num_classes, batch_size, nEpoch, lrn_rate, hidden_size, dropout, weight_decay, epsilon): - wandb.init(project="smileLab-PSL", entity="joenatan30") + wandb.init(project="sign_language_project", entity="ml_projects") run = wandb.init() config = wandb.config @@ -26,6 +26,7 @@ def initConfigWandb(num_layers, num_classes, batch_size, config.dropout = dropout config["weight_decay"] = weight_decay config["epsilon"] = epsilon +''' def wandbTrainLog(trainLoss, TrainAcc): wandb.log({"Train loss": trainLoss, diff --git a/SL-GCN/work_dir/.gitkeep b/SL-GCN/work_dir/.gitkeep deleted file mode 100644 index e69de29..0000000 From 680ea715086a85c5a3aadcee4af9bdc61e3ebac0 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 7 Sep 2022 03:26:49 +0000 Subject: [PATCH 15/56] automatizacion --- .gitignore | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.gitignore b/.gitignore index 510c73d..9032ca2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,13 @@ +data/ +save_models/ +work_dir/ +wandb/ + +SL-GCN/data/ +SL-GCN/save_models/ +SL-GCN/work_dir/ +SL-GCN/wandb/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] From d6091f0f33c71510606eef103bbcf6064e253584 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 7 Sep 2022 03:56:04 +0000 Subject: [PATCH 16/56] hyperparameter tunning 71 v1 --- SL-GCN/runModel.sh | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index bb2df16..1417e08 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -32,24 +32,25 @@ python main.py --experiment_name results/f_71/AEC/cris_mediapipe_AEC --database python main.py --experiment_name results/f_71/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml ''' -########## tunning ###########33 - -''' -python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.00025 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.00005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.00025 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.00005 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name --mode_train tunning results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -''' +########## tunning ###########71 + +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml From 1c672a1791b21a401abb32d5a6a052e1634035ff Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 7 Sep 2022 04:14:34 +0000 Subject: [PATCH 17/56] running tunning 71 y 29 keypoints --- SL-GCN/runModel.sh | 68 +++++++++++++++++++++++++++++++--------------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 1417e08..3c7441c 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -32,25 +32,49 @@ python main.py --experiment_name results/f_71/AEC/cris_mediapipe_AEC --database python main.py --experiment_name results/f_71/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml ''' -########## tunning ###########71 - -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 2 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +########## tunning ########### 71 + +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + +########## tunning ########### 29 + +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml + From 1b463f5b051dafcce15486cfe4bdbc222a256224 Mon Sep 17 00:00:00 2001 From: Cristian Lazo Quispe <31221056+CristianLazoQuispe@users.noreply.github.com> Date: Sat, 17 Sep 2022 23:29:21 -0500 Subject: [PATCH 18/56] Update readme.md --- SL-GCN/readme.md | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 851d5f4..c9ba850 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -1,21 +1,4 @@ # Skeleton Based Sign Language Recognition -## To use with [Connecting Points](https://github.com/JoeNatan30/ConnectingPoints) repository -### Generate smile-lab data split (from the split of connecting points) and Smile-lab model variable preparation to train -1. Run "data_gen/getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) - -2. Modify "num_point", "num_class" and "device" variable of the yaml file "/config/sign/train/train_joint.yaml" as it is needed (same as setted in the previous step) - -3. Modify "num_node" variable in sign_27 - -4. Go to "if __name__ == '__main__':" section of main.py (in SL-GCN folder) and modify "config" paremeters - -5. run -``` -python main.py --config config/sign/train/train_joint.yaml -``` - -Note: if you don't have a wandb account, you need to set "wandbFlag" variable of "main.py" to False and modify the code to have reports - ---------------------------- + runModel.sh From cf9d4b5e9fc19cc8d0d5d6888727043a820c6cdf Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Sun, 18 Sep 2022 04:58:51 +0000 Subject: [PATCH 19/56] mas val accuracy and top5 --- SL-GCN/main.py | 12 ++++- SL-GCN/runModel.sh | 110 +++++++++++++++------------------------ SL-GCN/wandbFunctions.py | 6 ++- 3 files changed, 57 insertions(+), 71 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 15d3823..46f3b75 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -185,6 +185,9 @@ def __init__(self, arg): self.best_acc = 0 self.best_tmp_acc = 0 + self.maxTestAcc = 0 + self.relative_maxtop5 = 0 + def connectingPoints(self,arg): print('Creating points .. ') @@ -680,7 +683,14 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r print('Eval Accuracy: ', accuracy, ' model: ', self.arg.model_saved_directory) if wandbFlag: - wandbF.wandbValLog(np.mean(loss_value), accuracy, top5) + + self.maxTestAcc = max(accuracy,self.maxTestAcc) + + if self.maxTestAcc == accuracy: + + self.relative_maxtop5 = top5 + + wandbF.wandbValLog(np.mean(loss_value), accuracy, top5,self.maxTestAcc,self.relative_maxtop5) score_dict = dict( zip(self.data_loader[ln].dataset.sample_name, score)) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 3c7441c..1302fb4 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -4,77 +4,51 @@ #num_point: 29 # 29 or 71 ''' -python main.py --experiment_name results/f_29/PUCP/cris_wholepose_PUCP --database PUCP --keypoints_model wholepose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_mediapipe_PUCP --database PUCP --keypoints_model mediapipe --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/WLASL/cris_wholepose_WLASL --database WLASL --keypoints_model wholepose --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_openpose_WLASL --database WLASL --keypoints_model openpose --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - - -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_mediapipe_AEC --database AEC --keypoints_model mediapipe --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - - -python main.py --experiment_name results/f_71/PUCP/cris_wholepose_PUCP --database PUCP --keypoints_model wholepose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_mediapipe_PUCP --database PUCP --keypoints_model mediapipe --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/WLASL/cris_wholepose_WLASL --database WLASL --keypoints_model wholepose --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_openpose_WLASL --database WLASL --keypoints_model openpose --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/WLASL--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - - -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_mediapipe_AEC --database AEC --keypoints_model mediapipe --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_openpose_AEC --database AEC --keypoints_model openpose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 5 --training_set_path ../../../cristian/dataset_original/AEC--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -''' - ########## tunning ########### 71 -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml ########## tunning ########### 29 -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../cristian/dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml + +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml +''' +python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train new_server --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 20 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index cb55e62..aeb90ef 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -33,10 +33,12 @@ def wandbTrainLog(trainLoss, TrainAcc): "Train accuracy": TrainAcc }) -def wandbValLog(testLoss, TestAcc, top5): +def wandbValLog(testLoss, TestAcc, top5,maxTestAcc,relative_maxtop5): wandb.log({"Val Loss": testLoss, "Val accuracy": TestAcc, - "Val Top5 acc": top5}) + "Val Top5 acc": top5, + "Val max accu":maxTestAcc, + "Val rel max Top5 acc":relative_maxtop5}) def watch(model): wandb.watch(model) From d6d4534319fb99f0cfbb32823c8de8a07eccce62 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 18 Sep 2022 05:26:04 +0000 Subject: [PATCH 20/56] seed --- SL-GCN/main.py | 16 +++++++++------- SL-GCN/runModel.sh | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 46f3b75..5dddda9 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -74,11 +74,11 @@ def create_folder(directory): print('created paths') -def init_seed(_): - torch.cuda.manual_seed_all(1) - torch.manual_seed(1) - np.random.seed(1) - random.seed(1) +def init_seed(value_seed): + torch.cuda.manual_seed_all(value_seed) + torch.manual_seed(value_seed) + np.random.seed(value_seed) + random.seed(value_seed) #torch.backends.cudnn.enabled = False torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False @@ -847,6 +847,7 @@ def import_class(name): "num_points": arg.keypoints_number, "database": arg.database, "mode_train":arg.mode_train, + "seed":arg.seed } if wandbFlag: @@ -880,7 +881,7 @@ def import_class(name): # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} #os.makedirs(arg.file_name,exist_ok=True) - runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-LrnRate" + str(arg.base_lr)+ "-NClases" + str(arg.num_class) + "-Batch" + str(arg.batch_size) + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed) model_name = runAndModelName print('model_name : ',model_name) @@ -888,7 +889,8 @@ def import_class(name): wandb.run.name = runAndModelName wandb.run.save() - init_seed(0) + print('seed :',arg.seed) + init_seed(arg.seed) print(arg) print(arg.train_feeder_args) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 1302fb4..a4d1eb0 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -51,4 +51,4 @@ python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_tra python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml ''' -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train new_server --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 20 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +python main.py --seed 84 --experiment_name results/f_71/AEC/cris_wholepose_AEC_seed_42 --mode_train new_server --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 20 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml From 0c11191e1938a77666dde1fc44d2a29bda751ba9 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Mon, 19 Sep 2022 05:34:15 +0000 Subject: [PATCH 21/56] fundamenntacion1 --- SL-GCN/main.py | 35 ++++++++---- SL-GCN/runModel.sh | 134 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 156 insertions(+), 13 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 5dddda9..d0e19a0 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -62,16 +62,6 @@ def create_folder(directory): print('directory : ',directory) create_one_folder(directory) create_one_folder(directory+'/') - - create_one_folder(directory) - create_one_folder(directory+'/ga') - time.sleep(2) - - create_one_folder(directory) - create_one_folder(directory+'/ga') - - time.sleep(5) - print('created paths') def init_seed(value_seed): @@ -91,7 +81,7 @@ def get_parser(): parser.add_argument('-model_saved_directory', default='') parser.add_argument('-experiment_name', default='') - parser.add_argument('--config',default='./config/nturgbd-cross-view/test_bone.yaml',help='path to the configuration file') + parser.add_argument('--config',default='config/sign/train/train_joint.yaml',help='path to the configuration file') # processor parser.add_argument('--phase', default='train', help='must be train or test') @@ -142,7 +132,7 @@ def get_parser(): parser.add_argument("--keypoints_model", type=str, default="openpose", help="Path to the training dataset CSV file") parser.add_argument("--keypoints_number", type=int, default=29, help="Path to the training dataset CSV file") parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file") - parser.add_argument("--num_class", type=int, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--num_class", type=int, default=0, help="Path to the testing dataset CSV file") parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") @@ -804,6 +794,7 @@ def import_class(name): arg = parser.parse_args() + print('arg.config',arg.config) if arg.config is not None: with open(arg.config, 'r') as f: @@ -820,6 +811,21 @@ def import_class(name): # load arg form config file arg = parser.parse_args() + arg.training_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + + if arg.database == 'AEC': + arg.num_class = 28 + + if arg.database == 'WLASL': + + arg.num_class = 86 + + if arg.database == 'PUCP': + arg.num_class = 29 + arg.training_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + arg.model_args['num_class'] =arg.num_class arg.model_args['num_point'] =arg.keypoints_number @@ -892,7 +898,12 @@ def import_class(name): print('seed :',arg.seed) init_seed(arg.seed) + + print("*"*30) + print("*"*30) print(arg) + print("*"*30) + print("*"*30) print(arg.train_feeder_args) print('train_feeder_args',arg.train_feeder_args) processor = Processor(arg) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index a4d1eb0..dce2d0c 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -51,4 +51,136 @@ python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_tra python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml ''' -python main.py --seed 84 --experiment_name results/f_71/AEC/cris_wholepose_AEC_seed_42 --mode_train new_server --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 20 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml +# AEC AEC PUCP PUCP WASL WASL +# 0.05 0.1 0.05 0.1 0.1 0.05 + +########### 5 ########### +### POINTS 71 ### +python main.py --seed 5 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 5 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 5 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +### POINTS 29 ### +python main.py --seed 5 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 5 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 5 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +########### 15 ########### +### POINTS 71 ### +python main.py --seed 15 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 15 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 15 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +### POINTS 29 ### +python main.py --seed 15 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 15 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 15 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 15 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +########### 25 ########### +### POINTS 71 ### +python main.py --seed 25 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 25 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 25 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +### POINTS 29 ### +python main.py --seed 25 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 25 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 25 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 25 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +########### 35 ########### +### POINTS 71 ### +python main.py --seed 35 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 35 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 35 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +### POINTS 29 ### +python main.py --seed 35 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 35 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 35 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 35 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + + +########### 45 ########### +### POINTS 71 ### +python main.py --seed 45 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 45 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 45 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +### POINTS 29 ### +python main.py --seed 45 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 5 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 45 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 + +python main.py --seed 45 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +python main.py --seed 45 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 From 099bbff443c8590c9028493735eb9f1e05f3ab93 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Tue, 20 Sep 2022 02:39:25 +0000 Subject: [PATCH 22/56] run experiment --- SL-GCN/main.py | 367 +++++++++++++++++++++++++++++++-------------- SL-GCN/runModel.sh | 193 ++---------------------- 2 files changed, 268 insertions(+), 292 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index d0e19a0..369df3a 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -56,13 +56,13 @@ def create_folder(directory): total_path ='' for i in path: total_path = os.path.join(total_path,i) - print(i, ' create : ',total_path) + #print(i, ' create : ',total_path) create_one_folder(total_path) - print('directory : ',directory) + #print('directory : ',directory) create_one_folder(directory) create_one_folder(directory+'/') - print('created paths') + #print('created paths') def init_seed(value_seed): torch.cuda.manual_seed_all(value_seed) @@ -249,15 +249,28 @@ def load_model(self): if wandbFlag: wandbF.watch(self.model) self.loss = nn.CrossEntropyLoss().cuda(output_device) + + path_model_init = os.path.join(arg.model_saved_directory,arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-"+str(arg.seed)+"-init.pt") + + self.print_log('%'*20) + self.print_log('path_model_init :') + self.print_log(path_model_init) + torch.save(self.model.state_dict(), path_model_init) + self.print_log('%'*20) + # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) + + #self.slrt_model_wp.load_state_dict(self.slrt_model_op.state_dict()) + if self.arg.weights: self.print_log('Load weights from {}.'.format(self.arg.weights)) if '.pkl' in self.arg.weights: with open(self.arg.weights, 'r') as f: - weights = pickle.load(f) + weights = pickle.load(f) else: - weights = torch.load(self.arg.weights) + weights = torch.load(self.arg.weights) + self.print_log("weights readed!") weights = OrderedDict( [[k.split('module.')[-1], @@ -270,7 +283,10 @@ def load_model(self): self.print_log('Can Not Remove Weights: {}.'.format(w)) try: + self.print_log("load state dict weights") self.model.load_state_dict(weights) + self.print_log("load state dict weights completed!") + except: state = self.model.state_dict() diff = list(set(state.keys()).difference(set(weights.keys()))) @@ -390,6 +406,98 @@ def split_time(self): self.record_time() return split_time + def train_zero(self, epoch, save_model=False): + self.model.train(False) + loader = self.data_loader['train'] + loss_value = [] + predict_arr = [] + proba_arr = [] + target_arr = [] + + self.record_time() + + timer = dict(dataloader=0.001, model=0.001, statistics=0.001) + process = tqdm(loader) + meaning = list(self.meaning.values()) + + for batch_idx, (data, label, index, name) in enumerate(process): + + self.global_step += 1 + + label_tmp = label.cpu().numpy() + # get data + data = Variable(data.float().cuda(self.output_device), requires_grad=False) + label = Variable(label.long().cuda(self.output_device), requires_grad=False) + timer['dataloader'] += self.split_time() + + # forward + if epoch < 100: + keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 + else: + keep_prob = self.arg.keep_rate + + output = self.model(data, keep_prob) + + if isinstance(output, tuple): + output, l1 = output + l1 = l1.mean() + else: + l1 = 0 + + #print('output',output) + #print('label',label) + loss = self.loss(output, label) + #print('loss',loss) + #for r,s in zip(name,label_tmp): + # meaning[s]= '_'.join(r.split('_')[:-1]) + + loss_value.append(loss.data.cpu().numpy()) + timer['model'] += self.split_time() + + value, predict_label = torch.max(output.data, 1) + + predict_arr.append(predict_label.cpu().numpy()) + target_arr.append(label.data.cpu().numpy()) + proba_arr.append(output.data.cpu().numpy()) + + acc = torch.mean((predict_label == label.data).float()) + + + if self.global_step % self.arg.log_interval == 0: + self.print_log( + '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( + batch_idx, len(loader), loss.data, self.lr)) + timer['statistics'] += self.split_time() + + predict_arr = np.concatenate(predict_arr) + target_arr = np.concatenate(target_arr) + proba_arr = np.concatenate(proba_arr) + accuracy = torch.mean((predict_label == label.data).float()) + if accuracy >= self.best_tmp_acc: + self.best_tmp_acc = accuracy + + if epoch+1 == arg.num_epoch: + if wandbFlag: + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + #y_true=list(label.values()), + #preds=list(predict_label.values()), + y_true=list(target_arr), + preds=list(predict_arr), + class_names=meaning, + title="TRAIN_conf_mat")}) + + if wandbFlag: + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 + wandbF.wandbTrainLog(mean_loss, accuracy) + # statistics of time consumption and loss + proportion = { + k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) + for k, v in timer.items() + } + def train(self, epoch, save_model=False): self.model.train() @@ -491,7 +599,11 @@ def train(self, epoch, save_model=False): title="TRAIN_conf_mat")}) if wandbFlag: - wandbF.wandbTrainLog(np.mean(loss_value), accuracy) + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 + + wandbF.wandbTrainLog(mean_loss, accuracy) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) @@ -540,7 +652,10 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r else: l1 = 0 + #print('val output',output) + #print('val label',label) loss = self.loss(output, label) + #print('val loss',loss) score_frag.append(output.data.cpu().numpy()) loss_value.append(loss.data.cpu().numpy()) @@ -653,12 +768,14 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r if epoch + 1 == arg.num_epoch: if wandbFlag: - wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ - labels=meaning, classes_to_plot=None)}) - - wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, - labels=meaning, classes_to_plot=None)}) - + try: + wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ + labels=meaning, classes_to_plot=None)}) + + wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, + labels=meaning, classes_to_plot=None)}) + except: + pass #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, # , meaning_3)}) ''' @@ -673,6 +790,9 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r print('Eval Accuracy: ', accuracy, ' model: ', self.arg.model_saved_directory) if wandbFlag: + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 self.maxTestAcc = max(accuracy,self.maxTestAcc) @@ -680,7 +800,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r self.relative_maxtop5 = top5 - wandbF.wandbValLog(np.mean(loss_value), accuracy, top5,self.maxTestAcc,self.relative_maxtop5) + wandbF.wandbValLog(mean_loss, accuracy, top5,self.maxTestAcc,self.relative_maxtop5) score_dict = dict( zip(self.data_loader[ln].dataset.sample_name, score)) @@ -738,7 +858,14 @@ def start(self): self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg)))) self.global_step = self.arg.start_epoch * \ len(self.data_loader['train']) / self.arg.batch_size + + self.model.train(False) + self.train_zero(0, save_model=False) + val_loss = self.eval(0,save_score=self.arg.save_score,loader_name=['test']) + self.model.train(True) + for epoch in range(self.arg.start_epoch, self.arg.num_epoch): + save_model = ((epoch + 1) % self.arg.save_interval == 0) or ( epoch + 1 == self.arg.num_epoch) @@ -788,123 +915,133 @@ def import_class(name): if __name__ == '__main__': + + parser = get_parser() - - # load arg form config file arg = parser.parse_args() + print('seed :',arg.seed) + init_seed(arg.seed) + + for id_iteration in range(1): + + # load arg form config file + + + + print('arg.config',arg.config) + if arg.config is not None: + with open(arg.config, 'r') as f: + #default_arg = yaml.load(f) + default_arg = yaml.safe_load(f) + print('default_arg',default_arg) + key = vars(arg).keys() + for k in default_arg.keys(): + if k not in key: + print('WRONG ARG: {}'.format(k)) + assert (k in key) + parser.set_defaults(**default_arg) + + # load arg form config file + arg = parser.parse_args() + + arg.training_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + + if arg.database == 'AEC': + arg.num_class = 28 + if arg.database == 'WLASL': + arg.num_class = 86 + + if arg.database == 'PUCP': + arg.num_class = 29 + arg.training_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + + arg.model_args['num_class'] =arg.num_class + arg.model_args['num_point'] =arg.keypoints_number + + arg.model_args['graph_args']['num_node'] =arg.keypoints_number + + #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + #num_point: 29 # 29 or 71 + + # arg.training_set_path + # arg.keypoints_model + # arg.keypoints_number + # arg.testing_set_path + # arg.experiment_name + # arg.base_lr + # arg.num_epoch - print('arg.config',arg.config) - if arg.config is not None: - with open(arg.config, 'r') as f: - #default_arg = yaml.load(f) - default_arg = yaml.safe_load(f) - print('default_arg',default_arg) - key = vars(arg).keys() - for k in default_arg.keys(): - if k not in key: - print('WRONG ARG: {}'.format(k)) - assert (k in key) - parser.set_defaults(**default_arg) - # load arg form config file - arg = parser.parse_args() + config = { + # + "num-epoch": arg.num_epoch, + "weight-decay": arg.weight_decay, + "batch-size":arg.batch_size, + "base-lr": arg.base_lr, + "kp-model": arg.keypoints_model, + "num_points": arg.keypoints_number, + "database": arg.database, + "mode_train":arg.mode_train, + "seed":arg.seed, + "id_iteration":id_iteration, + } - arg.training_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + if wandbFlag: + wandb.init(project="sign_language_project", + entity="ml_projects", + config=config) - if arg.database == 'AEC': - arg.num_class = 28 + config = wandb.config + print('+'*10) + print('config :',config) + print('+'*10) + arg.base_lr = config["base-lr"] + arg.batch_size = config["batch-size"] + arg.weight_decay = config["weight-decay"] + arg.num_epoch = config["num-epoch"] + arg.kp_model = config["kp-model"] + arg.database = config["database"] - if arg.database == 'WLASL': + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + arg.work_dir = "work_dir/"+arg.experiment_name+"/" - arg.num_class = 86 - - if arg.database == 'PUCP': - arg.num_class = 29 - arg.training_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + print('*'*20) + print('*'*20) - arg.model_args['num_class'] =arg.num_class - arg.model_args['num_point'] =arg.keypoints_number + print('model_saved_directory',arg.model_saved_directory) + print('work_dir',arg.work_dir) - arg.model_args['graph_args']['num_node'] =arg.keypoints_number - #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 - #num_point: 29 # 29 or 71 + create_folder(arg.model_saved_directory) + create_folder(arg.work_dir) + create_folder('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/') - # arg.training_set_path - # arg.keypoints_model - # arg.keypoints_number - # arg.testing_set_path - # arg.experiment_name - # arg.base_lr - # arg.num_epoch + # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} + #os.makedirs(arg.file_name,exist_ok=True) - - config = { - # - "num-epoch": arg.num_epoch, - "weight-decay": arg.weight_decay, - "batch-size":arg.batch_size, - "base-lr": arg.base_lr, - "kp-model": arg.keypoints_model, - "num_points": arg.keypoints_number, - "database": arg.database, - "mode_train":arg.mode_train, - "seed":arg.seed - } - - if wandbFlag: - wandb.init(project="sign_language_project", - entity="ml_projects", - config=config) - - config = wandb.config - - arg.base_lr = config["base-lr"] - arg.batch_size = config["batch-size"] - arg.weight_decay = config["weight-decay"] - arg.num_epoch = config["num-epoch"] - arg.kp_model = config["kp-model"] - arg.database = config["database"] - - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" - arg.work_dir = "work_dir/"+arg.experiment_name+"/" - - print('*'*20) - print('*'*20) - - print('model_saved_directory',arg.model_saved_directory) - print('work_dir',arg.work_dir) - - - create_folder(arg.model_saved_directory) - create_folder(arg.work_dir) - create_folder('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/') - - # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} - #os.makedirs(arg.file_name,exist_ok=True) - - runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed) - - model_name = runAndModelName - print('model_name : ',model_name) - if wandbFlag: - wandb.run.name = runAndModelName - wandb.run.save() + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) + + model_name = runAndModelName + print('model_name : ',model_name) + if wandbFlag: + wandb.run.name = runAndModelName + wandb.run.save() - print('seed :',arg.seed) - init_seed(arg.seed) - print("*"*30) - print("*"*30) - print(arg) - print("*"*30) - print("*"*30) - print(arg.train_feeder_args) - print('train_feeder_args',arg.train_feeder_args) - processor = Processor(arg) - processor.start() + print("*"*30) + print("*"*30) + print(arg) + print("*"*30) + print("*"*30) + print(arg.train_feeder_args) + print('train_feeder_args',arg.train_feeder_args) + processor = Processor(arg) + processor.start() + if wandbFlag: + wandb.finish() + print("wandb finish") diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index dce2d0c..7f38994 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -1,186 +1,25 @@ -#python main.py --config config/sign/train/train_joint.yaml - #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 - - #num_point: 29 # 29 or 71 - -''' -########## tunning ########### 71 - -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_71/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 71 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - -########## tunning ########### 29 - -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.05 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.01 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0005 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/AEC/cris_wholepose_AEC --mode_train tunning --database AEC --keypoints_model wholepose --base_lr 0.0001 --num_class 28 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/AEC--wholepose-Train.hdf5 --testing_set_path ../../../dataset_original/AEC--wholepose-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.05 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.01 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0005 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/WLASL/cris_mediapipe_WLASL --mode_train tunning --database WLASL --keypoints_model mediapipe --base_lr 0.0001 --num_class 86 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/WLASL--mediapipe-Train.hdf5 --testing_set_path ../../../dataset_original/WLASL--mediapipe-Val.hdf5 --config config/sign/train/train_joint.yaml - -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.05 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.01 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0005 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml -python main.py --experiment_name results/f_29/PUCP/cris_openpose_PUCP --mode_train tunning --database PUCP --keypoints_model openpose --base_lr 0.0001 --num_class 29 --keypoints_number 29 --num_epoch 500 --training_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Train.hdf5 --testing_set_path ../../../dataset_original/PUCP_PSL_DGI156--openpose-Val.hdf5 --config config/sign/train/train_joint.yaml - -''' -# AEC AEC PUCP PUCP WASL WASL -# 0.05 0.1 0.05 0.1 0.1 0.05 - -########### 5 ########### -### POINTS 71 ### -python main.py --seed 5 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 5 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 5 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -### POINTS 29 ### -python main.py --seed 5 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 5 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +#!/bin/bash -python main.py --seed 5 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -########### 15 ########### -### POINTS 71 ### -python main.py --seed 15 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 15 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -### POINTS 29 ### -python main.py --seed 15 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 15 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 15 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 15 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -########### 25 ########### -### POINTS 71 ### -python main.py --seed 25 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 25 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 25 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -### POINTS 29 ### -python main.py --seed 25 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 25 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 25 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 25 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -########### 35 ########### -### POINTS 71 ### -python main.py --seed 35 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 35 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 35 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -### POINTS 29 ### -python main.py --seed 35 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 35 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 - -python main.py --seed 35 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 35 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + #num_point: 29 # 29 or 71 -########### 45 ########### -### POINTS 71 ### -python main.py --seed 45 --experiment_name results/71/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.1 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 +declare -a points=(29 71 29 71 29 71) +declare -a lrs=(0.05 0.1 0.05 0.1 0.1 0.05) +declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") -python main.py --seed 45 --experiment_name results/71/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/71/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.05 --keypoints_number 71 --num_epoch 400 --mode_train sustentacion1 -### POINTS 29 ### -python main.py --seed 45 --experiment_name results/29/AEC/wholepose-AEC-s-42 --database AEC --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/AEC/mediapipe-AEC-s-42 --database AEC --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 5 --experiment_name results/29/AEC/openpose-AEC-s-42 --database AEC --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/PUCP/wholepose-PUCP-s-42 --database PUCP --keypoints_model wholepose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/PUCP/mediapipe-PUCP-s-42 --database PUCP --keypoints_model mediapipe --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/PUCP/openpose-PUCP-s-42 --database PUCP --keypoints_model openpose --base_lr 0.05 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/WLASL/wholepose-WLASL-s-42 --database WLASL --keypoints_model wholepose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/WLASL/mediapipe-WLASL-s-42 --database WLASL --keypoints_model mediapipe --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 -python main.py --seed 45 --experiment_name results/29/WLASL/openpose-WLASL-s-42 --database WLASL --keypoints_model openpose --base_lr 0.1 --keypoints_number 29 --num_epoch 400 --mode_train sustentacion1 +for i in 5 15 25 35 45 +do + for j in 0 1 2 3 4 5 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-AEC-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-AEC-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done \ No newline at end of file From 2b7cad1e5bd7fbe8d2a8b3c6488c8800fc494893 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 23 Sep 2022 03:43:14 +0000 Subject: [PATCH 23/56] simulacion fundamentacion 3 --- SL-GCN/config/sign/train/train_joint.yaml | 2 +- SL-GCN/runModel.sh | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 1aec012..628b00d 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -42,7 +42,7 @@ base_lr: 0.1 step: [150, 200] # training -device: [0, 1] +device: [0, 1 ,2 , 3] keep_rate: 0.9 only_train_epoch: 1 batch_size: 64 diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 7f38994..7b643df 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -1,25 +1,21 @@ #!/bin/bash - - #python main.py --config config/sign/train/train_joint.yaml #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 - #num_point: 29 # 29 or 71 - declare -a points=(29 71 29 71 29 71) declare -a lrs=(0.05 0.1 0.05 0.1 0.1 0.05) declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") -for i in 5 15 25 35 45 +for i in 0 1 5 15 25 35 45 55 65 75 85 95 do for j in 0 1 2 3 4 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-AEC-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[J]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-AEC-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done \ No newline at end of file From 5ca7c4a1b301614892dae2f51c29b8c42b83373b Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Wed, 28 Sep 2022 02:07:40 +0000 Subject: [PATCH 24/56] fundamentacion v3 --- SL-GCN/config/sign/train/train_joint.yaml | 2 +- SL-GCN/requirements.txt | 11 +++++++++++ SL-GCN/runModel.sh | 17 +++++++++++++---- 3 files changed, 25 insertions(+), 5 deletions(-) create mode 100644 SL-GCN/requirements.txt diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 628b00d..bb4dd0b 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -42,7 +42,7 @@ base_lr: 0.1 step: [150, 200] # training -device: [0, 1 ,2 , 3] +device: [0, 1,2,3 ] keep_rate: 0.9 only_train_epoch: 1 batch_size: 64 diff --git a/SL-GCN/requirements.txt b/SL-GCN/requirements.txt new file mode 100644 index 0000000..7d1bbb8 --- /dev/null +++ b/SL-GCN/requirements.txt @@ -0,0 +1,11 @@ +pandas==1.1.5 +tqdm==4.54.1 +matplotlib +scikit-learn +opencv-python==4.5.5.64 +torch --extra-index-url https://download.pytorch.org/whl/cu113 +torchvision --extra-index-url https://download.pytorch.org/whl/cu113 +h5py +seaborn +torchmetrics +wandb==0.13.2 \ No newline at end of file diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 7b643df..ad239af 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -8,14 +8,23 @@ declare -a points=(29 71 29 71 29 71) declare -a lrs=(0.05 0.1 0.05 0.1 0.1 0.05) declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") +for i in 1 +do + for j in 5 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done -for i in 0 1 5 15 25 35 45 55 65 75 85 95 +for i in 5 15 25 35 45 55 65 75 85 95 do for j in 0 1 2 3 4 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 500 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done \ No newline at end of file From a1e4901a31d2fe0bff07ff65cee5ed5de1808fcd Mon Sep 17 00:00:00 2001 From: Cristian Lazo Quispe <31221056+CristianLazoQuispe@users.noreply.github.com> Date: Wed, 5 Oct 2022 22:08:24 -0500 Subject: [PATCH 25/56] Update readme.md --- SL-GCN/readme.md | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 851d5f4..3c7e09a 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -2,18 +2,12 @@ ## To use with [Connecting Points](https://github.com/JoeNatan30/ConnectingPoints) repository -### Generate smile-lab data split (from the split of connecting points) and Smile-lab model variable preparation to train -1. Run "data_gen/getConnectingPoint.py" in data_gen folder (Not forget to modify "kpModel", "numPoints" and "dataset" variable) +### -2. Modify "num_point", "num_class" and "device" variable of the yaml file "/config/sign/train/train_joint.yaml" as it is needed (same as setted in the previous step) - -3. Modify "num_node" variable in sign_27 - -4. Go to "if __name__ == '__main__':" section of main.py (in SL-GCN folder) and modify "config" paremeters - -5. run +1. run ``` -python main.py --config config/sign/train/train_joint.yaml +bash runModel.sh + ``` Note: if you don't have a wandb account, you need to set "wandbFlag" variable of "main.py" to False and modify the code to have reports From 216c8e4dba79c46520639ff908b92df515c03d7a Mon Sep 17 00:00:00 2001 From: Cristian Lazo Quispe <31221056+CristianLazoQuispe@users.noreply.github.com> Date: Wed, 5 Oct 2022 22:08:48 -0500 Subject: [PATCH 26/56] Update readme.md --- SL-GCN/readme.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 3c7e09a..3058488 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -2,10 +2,7 @@ ## To use with [Connecting Points](https://github.com/JoeNatan30/ConnectingPoints) repository -### - -1. run -``` + ``` bash runModel.sh ``` From 0d4060886f877f2e351ebb3c04605cc01e49f6e4 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:35:02 -0500 Subject: [PATCH 27/56] cambios neurips --- SL-GCN/main.py | 14 +++++--------- SL-GCN/runModel.sh | 10 ++++++---- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 369df3a..c9f078b 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -2,9 +2,7 @@ from __future__ import print_function import argparse import os -from termios import VMIN import time -from xml.dom import minicompat import numpy as np import yaml import pickle @@ -17,15 +15,13 @@ from torch.autograd import Variable from tqdm import tqdm import shutil -from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR +from torch.optim.lr_scheduler import ReduceLROnPlateau import random import inspect import torchmetrics import matplotlib.pyplot as plt import seaborn as sns import pandas as pd -import torch.backends.cudnn as cudnn -import torch.nn.functional as F import wandbFunctions as wandbF import wandb import time @@ -944,8 +940,8 @@ def import_class(name): # load arg form config file arg = parser.parse_args() - arg.training_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../dataset_original/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + arg.training_set_path = '../../../'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' if arg.database == 'AEC': arg.num_class = 28 @@ -956,8 +952,8 @@ def import_class(name): if arg.database == 'PUCP': arg.num_class = 29 - arg.training_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../dataset_original/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + arg.training_set_path = '../../../PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../../PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' arg.model_args['num_class'] =arg.num_class arg.model_args['num_point'] =arg.keypoints_number diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index ad239af..1d5473a 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -12,12 +12,13 @@ for i in 1 do for j in 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done +""" for i in 5 15 25 35 45 55 65 75 85 95 do @@ -27,4 +28,5 @@ do python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done -done \ No newline at end of file +done +""" From 54b17c1a1063161ef87f2e2a96296934720fa293 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:41:39 -0500 Subject: [PATCH 28/56] cambios neurips --- SL-GCN/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index c9f078b..564d62f 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -985,10 +985,15 @@ def import_class(name): "seed":arg.seed, "id_iteration":id_iteration, } + import wandb + import os + + os.environ["WANDB_API_KEY"] = "15f7c99e787e3f99da09963b0cfb45b73656845f" if wandbFlag: wandb.init(project="sign_language_project", entity="ml_projects", + reinit=True, config=config) config = wandb.config From c5d0fffbd2132cca4d44a706ff841979d185cc59 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:47:56 -0500 Subject: [PATCH 29/56] # parameters --- SL-GCN/main.py | 7 ++++++- SL-GCN/wandbFunctions.py | 6 ++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 564d62f..7de6ef0 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -253,6 +253,11 @@ def load_model(self): self.print_log(path_model_init) torch.save(self.model.state_dict(), path_model_init) self.print_log('%'*20) + + self.m_params = sum(p.numel() for p in self.model.parameters()) + self.trainable_m_params= sum(p.numel() for p in self.model.parameters() if p.requires_grad) + + # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) @@ -599,7 +604,7 @@ def train(self, epoch, save_model=False): if mean_loss>10: mean_loss = 10 - wandbF.wandbTrainLog(mean_loss, accuracy) + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index aeb90ef..4b33c6e 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -28,9 +28,11 @@ def initConfigWandb(num_layers, num_classes, batch_size, config["epsilon"] = epsilon ''' -def wandbTrainLog(trainLoss, TrainAcc): +def wandbTrainLog(trainLoss, TrainAcc,p1,p2): wandb.log({"Train loss": trainLoss, - "Train accuracy": TrainAcc + "Train accuracy": TrainAcc, + "m_params":p1, + "trainable_m_params":p2 }) def wandbValLog(testLoss, TestAcc, top5,maxTestAcc,relative_maxtop5): From 0b400574b3104ad6d09d02b645186dc9c2f19eab Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:49:16 -0500 Subject: [PATCH 30/56] # parameters --- SL-GCN/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 7de6ef0..1932c4a 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -492,7 +492,7 @@ def train_zero(self, epoch, save_model=False): mean_loss = np.mean(loss_value) if mean_loss>10: mean_loss = 10 - wandbF.wandbTrainLog(mean_loss, accuracy) + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) From 48420caee193db0f096c061005c7ed69cfa9e321 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:51:00 -0500 Subject: [PATCH 31/56] # parameters --- SL-GCN/runModel.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 1d5473a..90c11d5 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -12,9 +12,9 @@ for i in 1 do for j in 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 4d9fd241496f8c30dd85cc053b20ac346efc2caa Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Fri, 7 Oct 2022 16:53:06 -0500 Subject: [PATCH 32/56] # paramters --- SL-GCN/runModel.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/SL-GCN/runModel.sh b/SL-GCN/runModel.sh index 90c11d5..967501d 100644 --- a/SL-GCN/runModel.sh +++ b/SL-GCN/runModel.sh @@ -10,11 +10,11 @@ declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") for i in 1 do - for j in 5 + for j in 0 1 2 3 4 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 4 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 9c77036a76c48d41e95dea01a7aa6919141e0b8d Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 23 Oct 2022 14:48:55 -0500 Subject: [PATCH 33/56] 51 points --- SL-GCN/.~lock.points_29.csv# | 1 + SL-GCN/config/sign/train/train_joint.yaml | 8 ++-- SL-GCN/feeders/feeder.py | 3 +- SL-GCN/main.py | 8 ++-- SL-GCN/points_51.csv | 52 +++++++++++++++++++++++ SL-GCN/runModelTest.sh | 21 +++++++++ 6 files changed, 85 insertions(+), 8 deletions(-) create mode 100644 SL-GCN/.~lock.points_29.csv# create mode 100644 SL-GCN/points_51.csv create mode 100644 SL-GCN/runModelTest.sh diff --git a/SL-GCN/.~lock.points_29.csv# b/SL-GCN/.~lock.points_29.csv# new file mode 100644 index 0000000..12a8f9d --- /dev/null +++ b/SL-GCN/.~lock.points_29.csv# @@ -0,0 +1 @@ +,cristian,cristian,23.10.2022 13:09,file:///home/cristian/.config/libreoffice/4; \ No newline at end of file diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index bb4dd0b..6049fe1 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -42,11 +42,13 @@ base_lr: 0.1 step: [150, 200] # training -device: [0, 1,2,3 ] +#device: [0, 1,2,3 ] + +device: [0] keep_rate: 0.9 only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 +batch_size: 8 +test_batch_size: 8 num_epoch: 250 nesterov: True warm_up_epoch: 20 diff --git a/SL-GCN/feeders/feeder.py b/SL-GCN/feeders/feeder.py index 6323253..bc3f9c6 100644 --- a/SL-GCN/feeders/feeder.py +++ b/SL-GCN/feeders/feeder.py @@ -9,6 +9,7 @@ # flip_index for 71 and 29 flip_index = {71:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0), + 51:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40],[41,42,43,44,45,46,47,48,49,50]), axis=0), 29:np.concatenate(([0,2,1,4,3,6,5,8,7],[9,10,11,12,13,14,15,16,17,18],[19,20,21,22,23,24,25,26,27,28]), axis=0)} class Feeder(Dataset): @@ -98,7 +99,7 @@ def __getitem__(self, index): if self.random_mirror: if random.random() > self.random_mirror_p: #print("dabe before random mirror", data_numpy) - assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 + assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 or data_numpy.shape[2] == 51 data_numpy = data_numpy[:,:,flip_index[data_numpy.shape[2]],:] if self.is_vector: data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 1932c4a..7c97b4c 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -945,8 +945,8 @@ def import_class(name): # load arg form config file arg = parser.parse_args() - arg.training_set_path = '../../../'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + arg.training_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' if arg.database == 'AEC': arg.num_class = 28 @@ -957,8 +957,8 @@ def import_class(name): if arg.database == 'PUCP': arg.num_class = 29 - arg.training_set_path = '../../../PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../../PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + arg.training_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' arg.model_args['num_class'] =arg.num_class arg.model_args['num_point'] =arg.keypoints_number diff --git a/SL-GCN/points_51.csv b/SL-GCN/points_51.csv new file mode 100644 index 0000000..dbecc24 --- /dev/null +++ b/SL-GCN/points_51.csv @@ -0,0 +1,52 @@ +tar_name,ori_name,mp_pos,wb_pos,op_pos,origin,tarjet +pose_nose,pose_nose,1,1,1,1,1 +pose_left_eye,pose_nose,3,2,17,1,2 +pose_right_eye,pose_nose,6,3,16,1,3 +pose_left_shoulder,pose_nose,12,6,6,1,4 +pose_right_shoulder,pose_nose,13,7,3,1,5 +pose_left_elbow,pose_left_shoulder,14,8,7,4,6 +pose_right_elbow,pose_right_shoulder,15,9,4,5,7 +pose_left_wrist,pose_left_elbow,16,10,8,6,8 +pose_right_wrist,pose_right_elbow,17,11,5,7,9 +face_right_mouth_up,pose_nose,71,74,76,1,10 +face_right_eyebrow_inner,pose_nose,89,45,47,1,11 +face_right_mouth_corner,face_right_mouth_up,91,72,74,10,12 +face_right_eyebrow_outer,face_right_eyebrow_middle,104,41,43,15,13 +face_right_mouth_down,face_right_mouth_corner,118,80,82,12,14 +face_right_eyebrow_middle,face_right_eyebrow_inner,139,43,45,11,15 +face_right_eye_outer,face_right_eyebrow_outer,164,60,62,13,16 +face_right_jaw_up,face_right_jaw_middle,166,27,29,20,17 +face_right_eye_inner,face_right_eye_outer,167,63,65,16,18 +face_right_jaw_down,pose_nose,182,31,33,1,19 +face_right_jaw_middle,face_right_jaw_down,206,29,31,19,20 +face_left_mouth_up,pose_nose,301,76,78,1,21 +face_left_eyebrow_inner,pose_nose,319,50,48,26,22 +face_left_mouth_corner,face_left_mouth_up,321,78,80,21,23 +face_left_eyebrow_outer,face_left_eyebrow_middle,334,46,52,1,24 +face_left_mouth_down,face_left_mouth_corner,348,82,84,23,25 +face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,24,26 +face_left_eye_outer,face_left_eyebrow_outer,393,69,71,22,27 +face_left_jaw_up,face_left_jaw_middle,395,37,39,31,28 +face_left_eye_inner,face_left_eye_outer,396,66,68,27,29 +face_left_jaw_down,pose_nose,411,33,35,1,30 +face_left_jaw_middle,face_left_jaw_down,431,35,37,30,31 +leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,32 +leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,32,33 +leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,34 +leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,34,35 +leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,36 +leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,36,37 +leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,38 +leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,38,39 +leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,40 +leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,40,41 +rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,42 +rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,42,43 +rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,44 +rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,44,45 +rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,46 +rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,46,47 +rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,48 +rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,48,49 +rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,50 +rightHand_pinky_tip,rightHand_pinky_dip,543,133,137,50,51 diff --git a/SL-GCN/runModelTest.sh b/SL-GCN/runModelTest.sh new file mode 100644 index 0000000..ddef9e5 --- /dev/null +++ b/SL-GCN/runModelTest.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + #num_point: 29 # 29 or 71 + +declare -a points=(51 29 71 29 71 29 71) +declare -a lrs=(0.05 0.05 0.1 0.05 0.1 0.1 0.05) +declare -a datasets=("PUCP" "PUCP" "PUCP" "AEC" "AEC" "WLASL" "WLASL") + +# for j in 0 1 2 3 4 5 + +for i in 1 +do + for j in 0 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done \ No newline at end of file From 8a1d10bb4fc2d3dfdc3cb268a4f0a0be27de7e5f Mon Sep 17 00:00:00 2001 From: Cristian Lazo Quispe <31221056+CristianLazoQuispe@users.noreply.github.com> Date: Wed, 26 Oct 2022 23:22:56 -0500 Subject: [PATCH 34/56] 51 points works well --- config/sign/finetune/train_bone.yaml | 50 + config/sign/finetune/train_bone_motion.yaml | 50 + config/sign/finetune/train_joint.yaml | 50 + config/sign/finetune/train_joint_motion.yaml | 49 + config/sign/test/test_bone.yaml | 52 + config/sign/test/test_bone_motion.yaml | 52 + config/sign/test/test_joint.yaml | 51 + config/sign/test/test_joint_motion.yaml | 51 + config/sign/test_finetuned/test_bone.yaml | 52 + .../sign/test_finetuned/test_bone_motion.yaml | 52 + config/sign/test_finetuned/test_joint.yaml | 51 + .../test_finetuned/test_joint_motion.yaml | 51 + config/sign/train/train_bone.yaml | 49 + config/sign/train/train_bone_motion.yaml | 49 + config/sign/train/train_joint.yaml | 54 + config/sign/train/train_joint_motion.yaml | 49 + data_gen/__init__.py | 0 data_gen/gen_bone_data.py | 74 ++ data_gen/gen_motion_data.py | 33 + data_gen/getConnectingPoint.py | 147 +++ data_gen/sign_gendata.py | 98 ++ feeders/__init__.py | 2 + feeders/feeder.py | 249 ++++ feeders/tools.py | 161 +++ graph/__init__.py | 2 + graph/sign_27.py | 80 ++ graph/tools.py | 27 + main.py | 1053 +++++++++++++++++ model/__init__.py | 3 + model/decouple_gcn_attn.py | 284 +++++ model/dropSke.py | 36 + model/dropT.py | 24 + points_51.csv | 52 + runModel.sh | 32 + runModelTest.sh | 23 + 35 files changed, 3192 insertions(+) create mode 100644 config/sign/finetune/train_bone.yaml create mode 100644 config/sign/finetune/train_bone_motion.yaml create mode 100644 config/sign/finetune/train_joint.yaml create mode 100644 config/sign/finetune/train_joint_motion.yaml create mode 100644 config/sign/test/test_bone.yaml create mode 100644 config/sign/test/test_bone_motion.yaml create mode 100644 config/sign/test/test_joint.yaml create mode 100644 config/sign/test/test_joint_motion.yaml create mode 100644 config/sign/test_finetuned/test_bone.yaml create mode 100644 config/sign/test_finetuned/test_bone_motion.yaml create mode 100644 config/sign/test_finetuned/test_joint.yaml create mode 100644 config/sign/test_finetuned/test_joint_motion.yaml create mode 100644 config/sign/train/train_bone.yaml create mode 100644 config/sign/train/train_bone_motion.yaml create mode 100644 config/sign/train/train_joint.yaml create mode 100644 config/sign/train/train_joint_motion.yaml create mode 100644 data_gen/__init__.py create mode 100644 data_gen/gen_bone_data.py create mode 100644 data_gen/gen_motion_data.py create mode 100644 data_gen/getConnectingPoint.py create mode 100644 data_gen/sign_gendata.py create mode 100644 feeders/__init__.py create mode 100644 feeders/feeder.py create mode 100644 feeders/tools.py create mode 100644 graph/__init__.py create mode 100644 graph/sign_27.py create mode 100644 graph/tools.py create mode 100644 main.py create mode 100644 model/__init__.py create mode 100644 model/decouple_gcn_attn.py create mode 100644 model/dropSke.py create mode 100644 model/dropT.py create mode 100644 points_51.csv create mode 100644 runModel.sh create mode 100644 runModelTest.sh diff --git a/config/sign/finetune/train_bone.yaml b/config/sign/finetune/train_bone.yaml new file mode 100644 index 0000000..ed0da3a --- /dev/null +++ b/config/sign/finetune/train_bone.yaml @@ -0,0 +1,50 @@ +Experiment_name: bone_27_2_finetune + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_bone2.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +weights: final_models/27_2/bone_epoch_239_9470.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_bone_motion.yaml b/config/sign/finetune/train_bone_motion.yaml new file mode 100644 index 0000000..9aba2df --- /dev/null +++ b/config/sign/finetune/train_bone_motion.yaml @@ -0,0 +1,50 @@ +Experiment_name: bone_motion_27_2_finetune + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_bone2_motion.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +weights: final_models/27_2/bone_motion_217_9249.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_joint.yaml b/config/sign/finetune/train_joint.yaml new file mode 100644 index 0000000..0b3f480 --- /dev/null +++ b/config/sign/finetune/train_joint.yaml @@ -0,0 +1,50 @@ +Experiment_name: joint_27_2_finetune + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_joint.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + window_size: 100 + random_shift: True + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: False + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +weights: final_models/27_2/joint_epoch_226_9468.pt +# start_epoch: 188 +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_joint_motion.yaml b/config/sign/finetune/train_joint_motion.yaml new file mode 100644 index 0000000..86382f2 --- /dev/null +++ b/config/sign/finetune/train_joint_motion.yaml @@ -0,0 +1,49 @@ +Experiment_name: joint_motion_27_2_finetune +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_joint_motion.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +weights: final_models/27_2/joint_motion_248_9301.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_bone.yaml b/config/sign/test/test_bone.yaml new file mode 100644 index 0000000..f97983a --- /dev/null +++ b/config/sign/test/test_bone.yaml @@ -0,0 +1,52 @@ +Experiment_name: bone_27_2_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_bone.npy + label_path: ./data/sign/27_2/train_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2/bone_epoch_239_9470.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_bone_motion.yaml b/config/sign/test/test_bone_motion.yaml new file mode 100644 index 0000000..4a34f20 --- /dev/null +++ b/config/sign/test/test_bone_motion.yaml @@ -0,0 +1,52 @@ +Experiment_name: bone_motion_27_2_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_bone_motion.npy + label_path: ./data/sign/27_2/train_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2/bone_motion_217_9249.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_joint.yaml b/config/sign/test/test_joint.yaml new file mode 100644 index 0000000..7a147bd --- /dev/null +++ b/config/sign/test/test_joint.yaml @@ -0,0 +1,51 @@ +Experiment_name: joint_27_2_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_joint.npy + label_path: ./data/sign/27_2/train_labels.pkl + debug: False + random_choose: True + window_size: 100 + random_shift: True + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: False + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2/joint_epoch_226_9468.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_joint_motion.yaml b/config/sign/test/test_joint_motion.yaml new file mode 100644 index 0000000..8419ddd --- /dev/null +++ b/config/sign/test/test_joint_motion.yaml @@ -0,0 +1,51 @@ +Experiment_name: joint_motion_27_2_test +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_joint_motion.npy + label_path: ./data/sign/27_2/train_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2/joint_motion_248_9301.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_bone.yaml b/config/sign/test_finetuned/test_bone.yaml new file mode 100644 index 0000000..9561ac8 --- /dev/null +++ b/config/sign/test_finetuned/test_bone.yaml @@ -0,0 +1,52 @@ +Experiment_name: bone_27_2_finetune_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_bone.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2_finetuned/bone_finetuned.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_bone_motion.yaml b/config/sign/test_finetuned/test_bone_motion.yaml new file mode 100644 index 0000000..5ed26ad --- /dev/null +++ b/config/sign/test_finetuned/test_bone_motion.yaml @@ -0,0 +1,52 @@ +Experiment_name: bone_motion_27_2_finetune_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_bone_motion.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_bone_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2_finetuned/bone_motion_finetuned.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_joint.yaml b/config/sign/test_finetuned/test_joint.yaml new file mode 100644 index 0000000..b9e9afe --- /dev/null +++ b/config/sign/test_finetuned/test_joint.yaml @@ -0,0 +1,51 @@ +Experiment_name: joint_27_2_finetune_test + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_joint.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + window_size: 100 + random_shift: True + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: False + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2_finetuned/joint_finetuned.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_joint_motion.yaml b/config/sign/test_finetuned/test_joint_motion.yaml new file mode 100644 index 0000000..3ab3fec --- /dev/null +++ b/config/sign/test_finetuned/test_joint_motion.yaml @@ -0,0 +1,51 @@ +Experiment_name: joint_motion_27_2_finetune_test +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_val_data_joint_motion.npy + label_path: ./data/sign/27_2/train_val_labels.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/test_data_joint_motion.npy + label_path: ./data/sign/27_2/test_labels_pseudo.pkl + random_mirror: False + normalization: True + is_vector: True + debug: False + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.01 +step: [50] + +# training +device: [0,1] +phase: test +weights: final_models/27_2_finetuned/joint_motion_finetuned.pt +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 100 +nesterov: True +warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/train/train_bone.yaml b/config/sign/train/train_bone.yaml new file mode 100644 index 0000000..f1cff0d --- /dev/null +++ b/config/sign/train/train_bone.yaml @@ -0,0 +1,49 @@ +Experiment_name: sign_bone_final + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_bone.npy + label_path: ./data/sign/27_2/train_label.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/val_data_bone.npy + label_path: ./data/sign/27_2/val_gt.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.1 +step: [150, 200] + +# training +device: [4,5,6,7] +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 250 +nesterov: True +warm_up_epoch: 20 \ No newline at end of file diff --git a/config/sign/train/train_bone_motion.yaml b/config/sign/train/train_bone_motion.yaml new file mode 100644 index 0000000..83c912b --- /dev/null +++ b/config/sign/train/train_bone_motion.yaml @@ -0,0 +1,49 @@ +Experiment_name: sign_bone_motion_final + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_bone_motion.npy + label_path: ./data/sign/27_2/train_label.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/val_data_bone_motion.npy + label_path: ./data/sign/27_2/val_gt.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.1 +step: [150, 200] + +# training +device: [4,5,6,7] +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 250 +nesterov: True +warm_up_epoch: 20 \ No newline at end of file diff --git a/config/sign/train/train_joint.yaml b/config/sign/train/train_joint.yaml new file mode 100644 index 0000000..6049fe1 --- /dev/null +++ b/config/sign/train/train_joint.yaml @@ -0,0 +1,54 @@ +#Experiment_name: sign_joint_final + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: data/sign/1/train_data_joint.npy + label_path: data/sign/1/train_label.pkl + meaning_path: data/sign/1/meaning.pkl + debug: False + random_choose: True + window_size: 100 + random_shift: True + normalization: True + random_mirror: True + random_mirror_p: 0.5 + is_vector: False + +test_feeder_args: + data_path: data/sign/1/val_data_joint.npy + label_path: data/sign/1/val_label.pkl + meaning_path: data/sign/1/meaning.pkl + random_mirror: False + normalization: True + +# model +# 226 (num classes) +model: model.decouple_gcn_attn.Model +model_args: + #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + #num_point: 29 # 29 or 71 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + num_node: 29 + +#optim +weight_decay: 0.0001 +base_lr: 0.1 +step: [150, 200] + +# training +#device: [0, 1,2,3 ] + +device: [0] +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 8 +test_batch_size: 8 +num_epoch: 250 +nesterov: True +warm_up_epoch: 20 diff --git a/config/sign/train/train_joint_motion.yaml b/config/sign/train/train_joint_motion.yaml new file mode 100644 index 0000000..5ef0f5a --- /dev/null +++ b/config/sign/train/train_joint_motion.yaml @@ -0,0 +1,49 @@ +Experiment_name: sign_joint_motion_final + +# feeder +feeder: feeders.feeder.Feeder +train_feeder_args: + data_path: ./data/sign/27_2/train_data_joint_motion.npy + label_path: ./data/sign/27_2/train_label.pkl + debug: False + random_choose: True + random_shift: True + window_size: 100 + random_mirror: True + random_mirror_p: 0.5 + normalization: True + is_vector: True + +test_feeder_args: + data_path: ./data/sign/27_2/val_data_joint_motion.npy + label_path: ./data/sign/27_2/val_gt.pkl + random_mirror: False + normalization: True + is_vector: True + +# model +model: model.decouple_gcn_attn.Model +model_args: + num_class: 226 + num_point: 27 + num_person: 1 + graph: graph.sign_27.Graph + groups: 16 + block_size: 41 + graph_args: + labeling_mode: 'spatial' + +#optim +weight_decay: 0.0001 +base_lr: 0.1 +step: [150, 200] + +# training +device: [0,1,2,3] +keep_rate: 0.9 +only_train_epoch: 1 +batch_size: 64 +test_batch_size: 64 +num_epoch: 250 +nesterov: True +warm_up_epoch: 20 \ No newline at end of file diff --git a/data_gen/__init__.py b/data_gen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/data_gen/gen_bone_data.py b/data_gen/gen_bone_data.py new file mode 100644 index 0000000..dbe36cb --- /dev/null +++ b/data_gen/gen_bone_data.py @@ -0,0 +1,74 @@ +import os +import numpy as np +from numpy.lib.format import open_memmap + +paris = { + 'ntu/xview': ( + (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), + (13, 1), + (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), + (25, 12) + ), + 'ntu/xsub': ( + (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), + (13, 1), + (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), + (25, 12) + ), + 'ntu120/xsetup': ( + (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), + (13, 1), + (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), + (25, 12) + ), + 'ntu120/xsub': ( + (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), + (13, 1), + (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), + (25, 12) + ), + 'sign/27': ((5, 6), (5, 7), + (6, 8), (8, 10), (7, 9), (9, 11), + (12,13),(12,14),(12,16),(12,18),(12,20), + (14,15),(16,17),(18,19),(20,21), + (22,23),(22,24),(22,26),(22,28),(22,30), + (24,25),(26,27),(28,29),(30,31), + (10,12),(11,22) + ), + 'sign/27_2': ((5, 6), (5, 7), + (6, 8), (8, 10), (7, 9), (9, 11), + (12,13),(12,14),(12,16),(12,18),(12,20), + (14,15),(16,17),(18,19),(20,21), + (22,23),(22,24),(22,26),(22,28),(22,30), + (24,25),(26,27),(28,29),(30,31), + (10,12),(11,22) + ) +} + +sets = { + 'train', 'val', 'test' +} + +datasets = { + 'sign/27_2' +} + +from tqdm import tqdm + +for dataset in datasets: + for set in sets: + print(dataset, set) + data = np.load('../data/{}/{}_data_joint.npy'.format(dataset, set)) + N, C, T, V, M = data.shape + fp_sp = open_memmap( + '../data/{}/{}_data_bone.npy'.format(dataset, set), + dtype='float32', + mode='w+', + shape=(N, 3, T, V, M)) + + fp_sp[:, :C, :, :, :] = data + for v1, v2 in tqdm(paris[dataset]): + v1 -= 5 + v2 -= 5 + fp_sp[:, :, :, v2, :] = data[:, :, :, v2, :] - data[:, :, :, v1, :] + # fp_sp[:, :, :, v1, :] = data[:, :, :, v1, :] - data[:, :, :, v2, :] diff --git a/data_gen/gen_motion_data.py b/data_gen/gen_motion_data.py new file mode 100644 index 0000000..dc3109a --- /dev/null +++ b/data_gen/gen_motion_data.py @@ -0,0 +1,33 @@ +import os +import numpy as np +from numpy.lib.format import open_memmap + +sets = { + 'train', 'val', 'test' + +} + +datasets = { + 'sign/27_2' +} + +parts = { + 'joint', 'bone' +} +from tqdm import tqdm + +for dataset in datasets: + for set in sets: + for part in parts: + print(dataset, set, part) + data = np.load('../data/{}/{}_data_{}.npy'.format(dataset, set, part)) + N, C, T, V, M = data.shape + print(data.shape) + fp_sp = open_memmap( + '../data/{}/{}_data_{}_motion.npy'.format(dataset, set, part), + dtype='float32', + mode='w+', + shape=(N, C, T, V, M)) + for t in tqdm(range(T - 1)): + fp_sp[:, :, t, :, :] = data[:, :, t + 1, :, :] - data[:, :, t, :, :] + fp_sp[:, :, T - 1, :, :] = 0 diff --git a/data_gen/getConnectingPoint.py b/data_gen/getConnectingPoint.py new file mode 100644 index 0000000..cd37a7b --- /dev/null +++ b/data_gen/getConnectingPoint.py @@ -0,0 +1,147 @@ +import pickle +import sys +import numpy as np +import pandas as pd +import os +import h5py +import pandas as pd +sys.path.extend(['../']) + +max_body_true = 1 +max_frame = 150 +num_channels = 2 + +# These three def return an index value less 1 because it array count starts at 1 +def get_mp_keys(points): + tar = np.array(points.mp_pos)-1 + return list(tar) + +def get_op_keys(points): + tar = np.array(points.op_pos)-1 + return list(tar) + +def get_wp_keys(points): + tar = np.array(points.wb_pos)-1 + return list(tar) + +def read_data(path, model_key_getter, config): + data = [] + classes = [] + videoName = [] + + if 'AEC' in path: + list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] + + if 'PUCP' in path: + list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] + list_labels_banned += ["sí","ella","uno","ese","ah","dijo","llamar"] + + if 'WLASL' in path: + list_labels_banned = ['apple','computer','fish','kiss','later','no','orange','pizza','purple','secretary','shirt','sunday','take','water','yellow'] + + + with h5py.File(path, "r") as f: + for index in f.keys(): + label = f[index]['label'][...].item().decode('utf-8') + + if str(label) in list_labels_banned: + continue + + classes.append(label) + videoName.append(f[index]['video_name'][...].item().decode('utf-8')) + data.append(f[index]["data"][...]) + + print('config : ',config) + points = pd.read_csv(f"points_{config}.csv") + + tar = model_key_getter(points) + print('tart',tar) + + data = [d[:,:,tar] for d in data] + + meaning = {v:k for (k,v) in enumerate(sorted(set(classes)))} + + retrive_meaning = {k:v for (k,v) in enumerate(sorted(set(classes)))} + + labels = [meaning[label] for label in classes] + + print('meaning',meaning) + print('retrive_meaning',retrive_meaning) + + return labels, videoName, data, retrive_meaning + + +def gendata(data_path, out_path, model_key_getter, part='train', config=1): + + data=[] + sample_names = [] + + labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter,config) + fp = np.zeros((len(labels), max_frame, config, num_channels, max_body_true), dtype=np.float32) + + for i, skel in enumerate(data): + + skel = np.array(skel) + skel = np.moveaxis(skel,1,2) + skel = skel # *256 + + if skel.shape[0] < max_frame: + L = skel.shape[0] + + fp[i,:L,:,:,0] = skel + + rest = max_frame - L + num = int(np.ceil(rest / L)) + pad = np.concatenate([skel for _ in range(num)], 0)[:rest] + fp[i,L:,:,:,0] = pad + + else: + L = skel.shape[0] + + fp[i,:,:,:,0] = skel[:max_frame,:,:] + + + with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f: + pickle.dump((sample_names, labels), f) + + fp = np.transpose(fp, [0, 3, 1, 2, 4]) + print(fp.shape) + np.save('{}/{}_data_joint.npy'.format(out_path, part), fp) + + with open('{}/meaning.pkl'.format(out_path), 'wb') as f: + pickle.dump(retrive_meaning, f) + + + + +if __name__ == '__main__': + + folderName= '1' # just used to create folder "1" in data/sign/1/ + out_folder='../data/sign/' + out_path = os.path.join(out_folder, folderName) + + kp_model = 'wholepose' # openpose wholepose mediapipe + dataset = "WLASL" # WLASL PUCP_PSL_DGI156 AEC + numPoints = 29 # number of points used, need to be: 29 or 71 + + model_key_getter = {'mediapipe': get_mp_keys, + 'openpose': get_op_keys, + 'wholepose': get_wp_keys} + + if not os.path.exists(out_path): + os.makedirs(out_path) + + + print('\n',kp_model, dataset,'\n') + + part = "train" + print(out_path,'->', part) + data_path = f'../../../../joe/ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) + + + part = "val" + print(out_path,'->', part) + data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Val.hdf5' + + gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) diff --git a/data_gen/sign_gendata.py b/data_gen/sign_gendata.py new file mode 100644 index 0000000..f5b4cf1 --- /dev/null +++ b/data_gen/sign_gendata.py @@ -0,0 +1,98 @@ +import argparse +import pickle +from tqdm import tqdm +import sys +import numpy as np +import os + +sys.path.extend(['../']) + +selected_joints = { + '59': np.concatenate((np.arange(0,17), np.arange(91,133)), axis=0), #59 + '31': np.concatenate((np.arange(0,11), [91,95,96,99,100,103,104,107,108,111],[112,116,117,120,121,124,125,128,129,132]), axis=0), #31 + '27': np.concatenate(([0,5,6,7,8,9,10], + [91,95,96,99,100,103,104,107,108,111],[112,116,117,120,121,124,125,128,129,132]), axis=0) #27 +} + +max_body_true = 1 +max_frame = 150 +num_channels = 3 + + + +def gendata(data_path, label_path, out_path, part='train', config='27'): + labels = [] + data=[] + sample_names = [] + selected = selected_joints[config] + num_joints = len(selected) + label_file = open(label_path, 'r', encoding='utf-8') + + + for line in label_file.readlines(): + line = line.strip() + line = line.split(',') + + sample_names.append(line[0]) + data.append(os.path.join(data_path, line[0] + '_color.mp4.npy')) + # print(line[1]) + labels.append(int(line[1])) + # print(labels[-1]) + + fp = np.zeros((len(data), max_frame, num_joints, num_channels, max_body_true), dtype=np.float32) + + for i, data_path in enumerate(data): + + # print(sample_names[i]) + skel = np.load(data_path) + skel = skel[:,selected,:] + + if skel.shape[0] < max_frame: + L = skel.shape[0] + print(L) + fp[i,:L,:,:,0] = skel + + rest = max_frame - L + num = int(np.ceil(rest / L)) + pad = np.concatenate([skel for _ in range(num)], 0)[:rest] + fp[i,L:,:,:,0] = pad + + else: + L = skel.shape[0] + print(L) + fp[i,:,:,:,0] = skel[:max_frame,:,:] + + + with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f: + pickle.dump((sample_names, labels), f) + + fp = np.transpose(fp, [0, 3, 1, 2, 4]) + print(fp.shape) + np.save('{}/{}_data_joint.npy'.format(out_path, part), fp) + + + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Sign Data Converter.') + parser.add_argument('--data_path', default='/data/sign/test_npy/npy') #'train_npy/npy', 'va_npy/npy' + parser.add_argument('--label_path', default='../data/sign/27/train_labels.csv') # 'train_labels.csv', 'val_gt.csv', 'test_labels.csv' + parser.add_argument('--out_folder', default='../data/sign/') + parser.add_argument('--points', default='27') + + part = 'test' # 'train', 'val' + arg = parser.parse_args() + + out_path = os.path.join(arg.out_folder, arg.points) + print(out_path) + if not os.path.exists(out_path): + os.makedirs(out_path) + + gendata( + arg.data_path, + arg.label_path, + out_path, + part=part, + config=arg.points) diff --git a/feeders/__init__.py b/feeders/__init__.py new file mode 100644 index 0000000..7eb0066 --- /dev/null +++ b/feeders/__init__.py @@ -0,0 +1,2 @@ +from . import tools +from . import feeder \ No newline at end of file diff --git a/feeders/feeder.py b/feeders/feeder.py new file mode 100644 index 0000000..bc3f9c6 --- /dev/null +++ b/feeders/feeder.py @@ -0,0 +1,249 @@ +import numpy as np +import pickle +import torch +from torch.utils.data import Dataset +import sys +import random +sys.path.extend(['../']) +from feeders import tools + +# flip_index for 71 and 29 +flip_index = {71:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0), + 51:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40],[41,42,43,44,45,46,47,48,49,50]), axis=0), + 29:np.concatenate(([0,2,1,4,3,6,5,8,7],[9,10,11,12,13,14,15,16,17,18],[19,20,21,22,23,24,25,26,27,28]), axis=0)} + +class Feeder(Dataset): + def __init__(self, data_path, label_path, meaning_path, + random_choose=False, random_shift=False, random_move=False, + window_size=-1, normalization=False, debug=False, use_mmap=True, random_mirror=False, random_mirror_p=0.5, is_vector=False): + + """ + :param data_path: + :param label_path: + :param random_choose: If true, randomly choose a portion of the input sequence + :param random_shift: If true, randomly pad zeros at the begining or end of sequence + :param random_move: + :param window_size: The length of the output sequence + :param normalization: If true, normalize input sequence + :param debug: If true, only use the first 100 samples + :param use_mmap: If true, use mmap mode to load data, which can save the running memory + """ + + self.debug = debug + self.data_path = data_path + self.label_path = label_path + self.meaning_path = meaning_path + self.random_choose = random_choose + self.random_shift = random_shift + self.random_move = random_move + self.window_size = window_size + self.normalization = normalization + self.use_mmap = use_mmap + self.random_mirror = random_mirror + self.random_mirror_p = random_mirror_p + self.load_data() + self.is_vector = is_vector + if normalization: + self.get_mean_map() + + def load_data(self): + # data: N C V T M + + try: + with open(self.label_path) as f: + self.sample_name, self.label = pickle.load(f) + except: + # for pickle file from python2 + with open(self.label_path, 'rb') as f: + self.sample_name, self.label = pickle.load(f, encoding='latin1') + + # load data + if self.use_mmap: + self.data = np.load(self.data_path, mmap_mode='r') + else: + self.data = np.load(self.data_path) + if self.debug: + self.label = self.label[0:100] + self.data = self.data[0:100] + self.sample_name = self.sample_name[0:100] + try: + with open(self.meaning_path) as f: + self.meaning = pickle.load(f) + except: + # for pickle file from python2 + with open(self.meaning_path, 'rb') as f: + self.meaning = pickle.load(f, encoding='latin1') + + + def get_mean_map(self): + data = self.data + N, C, T, V, M = data.shape + self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0) + self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1)) + + def __len__(self): + return len(self.label) + + def __iter__(self): + return self + + def __getitem__(self, index): + data_numpy = self.data[index] + label = self.label[index] + name = self.sample_name[index] + data_numpy = np.array(data_numpy) + + if self.random_choose: + data_numpy = tools.random_choose(data_numpy, self.window_size) + + if self.random_mirror: + if random.random() > self.random_mirror_p: + #print("dabe before random mirror", data_numpy) + assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 or data_numpy.shape[2] == 51 + data_numpy = data_numpy[:,:,flip_index[data_numpy.shape[2]],:] + if self.is_vector: + data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] + else: + data_numpy[0,:,:,:] = 1 - data_numpy[0,:,:,:] + #print("dabe after random mirror", data_numpy) + + if self.normalization: + # data_numpy = (data_numpy - self.mean_map) / self.std_map + assert data_numpy.shape[0] == 2 + #print("dabe before norm", data_numpy) + if self.is_vector: + data_numpy[0,:,0,:] = data_numpy[0,:,0,:] - data_numpy[0,:,0,0].mean(axis=0) + data_numpy[1,:,0,:] = data_numpy[1,:,0,:] - data_numpy[1,:,0,0].mean(axis=0) + else: + data_numpy[0,:,:,:] = data_numpy[0,:,:,:] - data_numpy[0,:,0,0].mean(axis=0) + data_numpy[1,:,:,:] = data_numpy[1,:,:,:] - data_numpy[1,:,0,0].mean(axis=0) + #print("dabe after norm", data_numpy) + if self.random_shift: + + #print("dabe before shift", data_numpy) + if self.is_vector: + data_numpy[0,:,0,:] += random.random() * 20 - 10.0 + data_numpy[1,:,0,:] += random.random() * 20 - 10.0 + else: + data_numpy[0,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 + data_numpy[1,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 + #print("dabe after shift", data_numpy) + + # if self.random_shift: + # data_numpy = tools.random_shift(data_numpy) + + # elif self.window_size > 0: + # data_numpy = tools.auto_pading(data_numpy, self.window_size) + if self.random_move: + data_numpy = tools.random_move(data_numpy) + + return data_numpy, label, index, name + + def top_k(self, score, top_k): + rank = score.argsort() + hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)] + return sum(hit_top_k) * 1.0 / len(hit_top_k) + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def test(data_path, label_path, vid=None, graph=None, is_3d=False): + ''' + vis the samples using matplotlib + :param data_path: + :param label_path: + :param vid: the id of sample + :param graph: + :param is_3d: when vis NTU, set it True + :return: + ''' + import matplotlib.pyplot as plt + loader = torch.utils.data.DataLoader( + dataset=Feeder(data_path, label_path), + batch_size=64, + shuffle=False, + num_workers=2) + + if vid is not None: + sample_name = loader.dataset.sample_name + sample_id = [name.split('.')[0] for name in sample_name] + index = sample_id.index(vid) + data, label, index = loader.dataset[index] + data = data.reshape((1,) + data.shape) + + # for batch_idx, (data, label) in enumerate(loader): + N, C, T, V, M = data.shape + + plt.ion() + fig = plt.figure() + if is_3d: + from mpl_toolkits.mplot3d import Axes3D + ax = fig.add_subplot(111, projection='3d') + else: + ax = fig.add_subplot(111) + + if graph is None: + p_type = ['b.', 'g.', 'r.', 'c.', 'm.', 'y.', 'k.', 'k.', 'k.', 'k.'] + pose = [ + ax.plot(np.zeros(V), np.zeros(V), p_type[m])[0] for m in range(M) + ] + ax.axis([-1, 1, -1, 1]) + for t in range(T): + for m in range(M): + pose[m].set_xdata(data[0, 0, t, :, m]) + pose[m].set_ydata(data[0, 1, t, :, m]) + fig.canvas.draw() + plt.pause(0.001) + else: + p_type = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'k-', 'k-', 'k-'] + import sys + from os import path + sys.path.append( + path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + G = import_class(graph)() + edge = G.inward + pose = [] + for m in range(M): + a = [] + for i in range(len(edge)): + if is_3d: + a.append(ax.plot(np.zeros(3), np.zeros(3), p_type[m])[0]) + else: + a.append(ax.plot(np.zeros(2), np.zeros(2), p_type[m])[0]) + pose.append(a) + ax.axis([-1, 1, -1, 1]) + if is_3d: + ax.set_zlim3d(-1, 1) + for t in range(T): + for m in range(M): + for i, (v1, v2) in enumerate(edge): + x1 = data[0, :2, t, v1, m] + x2 = data[0, :2, t, v2, m] + if (x1.sum() != 0 and x2.sum() != 0) or v1 == 1 or v2 == 1: + pose[m][i].set_xdata(data[0, 0, t, [v1, v2], m]) + pose[m][i].set_ydata(data[0, 1, t, [v1, v2], m]) + if is_3d: + pose[m][i].set_3d_properties(data[0, 2, t, [v1, v2], m]) + fig.canvas.draw() + # plt.savefig('/home/lshi/Desktop/skeleton_sequence/' + str(t) + '.jpg') + plt.pause(0.01) + + +if __name__ == '__main__': + import os + + os.environ['DISPLAY'] = 'localhost:10.0' + data_path = "../data/ntu/xview/val_data_joint.npy" + label_path = "../data/ntu/xview/val_label.pkl" + graph = 'graph.ntu_rgb_d.Graph' + test(data_path, label_path, vid='S004C001P003R001A032', graph=graph, is_3d=True) + # data_path = "../data/kinetics/val_data.npy" + # label_path = "../data/kinetics/val_label.pkl" + # graph = 'graph.Kinetics' + # test(data_path, label_path, vid='UOD7oll3Kqo', graph=graph) diff --git a/feeders/tools.py b/feeders/tools.py new file mode 100644 index 0000000..f14e9b9 --- /dev/null +++ b/feeders/tools.py @@ -0,0 +1,161 @@ +import random + +import numpy as np + + +def downsample(data_numpy, step, random_sample=True): + # input: C,T,V,M + begin = np.random.randint(step) if random_sample else 0 + return data_numpy[:, begin::step, :, :] + + +def temporal_slice(data_numpy, step): + # input: C,T,V,M + C, T, V, M = data_numpy.shape + return data_numpy.reshape(C, T / step, step, V, M).transpose( + (0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M) + + +def mean_subtractor(data_numpy, mean): + # input: C,T,V,M + # naive version + if mean == 0: + return + C, T, V, M = data_numpy.shape + valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0 + begin = valid_frame.argmax() + end = len(valid_frame) - valid_frame[::-1].argmax() + data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean + return data_numpy + + +def auto_pading(data_numpy, size, random_pad=False): + C, T, V, M = data_numpy.shape + if T < size: + begin = random.randint(0, size - T) if random_pad else 0 + data_numpy_paded = np.zeros((C, size, V, M)) + data_numpy_paded[:, begin:begin + T, :, :] = data_numpy + return data_numpy_paded + else: + return data_numpy + + +def random_choose(data_numpy, size, auto_pad=True): + # input: C,T,V,M 随机选择其中一段,不是很合理。因为有0 + C, T, V, M = data_numpy.shape + if T == size: + return data_numpy + elif T < size: + if auto_pad: + return auto_pading(data_numpy, size, random_pad=True) + else: + return data_numpy + else: + begin = random.randint(0, T - size) + return data_numpy[:, begin:begin + size, :, :] + + +def random_move(data_numpy, + angle_candidate=[-10., -5., 0., 5., 10.], + scale_candidate=[0.9, 1.0, 1.1], + transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2], + move_time_candidate=[1]): + # input: C,T,V,M + C, T, V, M = data_numpy.shape + move_time = random.choice(move_time_candidate) + node = np.arange(0, T, T * 1.0 / move_time).round().astype(int) + node = np.append(node, T) + num_node = len(node) + + A = np.random.choice(angle_candidate, num_node) + S = np.random.choice(scale_candidate, num_node) + T_x = np.random.choice(transform_candidate, num_node) + T_y = np.random.choice(transform_candidate, num_node) + + a = np.zeros(T) + s = np.zeros(T) + t_x = np.zeros(T) + t_y = np.zeros(T) + + # linspace + for i in range(num_node - 1): + a[node[i]:node[i + 1]] = np.linspace( + A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180 + s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1], + node[i + 1] - node[i]) + t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1], + node[i + 1] - node[i]) + t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1], + node[i + 1] - node[i]) + + theta = np.array([[np.cos(a) * s, -np.sin(a) * s], + [np.sin(a) * s, np.cos(a) * s]]) # xuanzhuan juzhen + + # perform transformation + for i_frame in range(T): + xy = data_numpy[0:2, i_frame, :, :] + new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1)) + new_xy[0] += t_x[i_frame] + new_xy[1] += t_y[i_frame] # pingyi bianhuan + data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M) + + return data_numpy + + +def random_shift(data_numpy): + # input: C,T,V,M 偏移其中一段 + C, T, V, M = data_numpy.shape + data_shift = np.zeros(data_numpy.shape) + valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0 + begin = valid_frame.argmax() + end = len(valid_frame) - valid_frame[::-1].argmax() + + size = end - begin + bias = random.randint(0, T - size) + data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :] + + return data_shift + + +def openpose_match(data_numpy): + C, T, V, M = data_numpy.shape + assert (C == 3) + score = data_numpy[2, :, :, :].sum(axis=1) + # the rank of body confidence in each frame (shape: T-1, M) + rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M) + + # data of frame 1 + xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1) + # data of frame 2 + xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M) + # square of distance between frame 1&2 (shape: T-1, M, M) + distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0) + + # match pose + forward_map = np.zeros((T, M), dtype=int) - 1 + forward_map[0] = range(M) + for m in range(M): + choose = (rank == m) + forward = distance[choose].argmin(axis=1) + for t in range(T - 1): + distance[t, :, forward[t]] = np.inf + forward_map[1:][choose] = forward + assert (np.all(forward_map >= 0)) + + # string data + for t in range(T - 1): + forward_map[t + 1] = forward_map[t + 1][forward_map[t]] + + # generate data + new_data_numpy = np.zeros(data_numpy.shape) + for t in range(T): + new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[ + t]].transpose(1, 2, 0) + data_numpy = new_data_numpy + + # score sort + trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0) + rank = (-trace_score).argsort() + data_numpy = data_numpy[:, :, :, rank] + + return data_numpy diff --git a/graph/__init__.py b/graph/__init__.py new file mode 100644 index 0000000..4a1bf91 --- /dev/null +++ b/graph/__init__.py @@ -0,0 +1,2 @@ +from . import tools +from . import sign_27 \ No newline at end of file diff --git a/graph/sign_27.py b/graph/sign_27.py new file mode 100644 index 0000000..7259295 --- /dev/null +++ b/graph/sign_27.py @@ -0,0 +1,80 @@ +import sys + +sys.path.extend(['../']) +from graph import tools +import pandas as pd + + + +''' +inward_ori_index = [(5, 6), (5, 7), + (6, 8), (8, 10), (7, 9), (9, 11), + (12,13),(12,14),(12,16),(12,18),(12,20), + (14,15),(16,17),(18,19),(20,21), + (22,23),(22,24),(22,26),(22,28),(22,30), + (24,25),(26,27),(28,29),(30,31), + (10,12),(11,22)] + + +inward_ori_index = [(1, 2), (1, 3), (2, 4), (4, 6), (3, 5), (5, 7), + + (6, 8), + (8, 9), (9, 10), (10, 11), (11, 12), + (8, 13), (13, 14), (14, 15), (15, 16), + (8, 17), (17, 18), (18, 19), (19, 20), + (8, 21), (21, 22), (22, 23), (23, 24), + (8, 25), (25, 26), (26, 27), (27, 28), + + (7, 29), + (29, 30), (30, 31), (31, 32), (32, 33), + (29, 34), (34, 35), (35, 36), (36, 37), + (29, 38), (38, 39), (39, 40), (40, 41), + (29, 42), (42, 43), (43, 44), (44, 45), + (29, 46), (46, 47), (47, 48), (48, 49) + ] +''' + + + +class Graph: + def __init__(self, labeling_mode='spatial',num_node=29): + self.num_node = num_node + #num_node = 29 # 29 or 71 + points = pd.read_csv(f"points_{self.num_node}.csv") + ori = points.origin + tar = points.tarjet + + self.inward_ori_index = [(o,t) for o, t in zip(ori, tar)] + + + self.self_link = [(i, i) for i in range(self.num_node)] + + self.inward = [(i - 1, j - 1) for (i, j) in self.inward_ori_index] + self.outward = [(j, i) for (i, j) in self.inward] + self.neighbor = self.inward + self.outward + + print("NUM OF NODES:", self.num_node) + + + self.A = self.get_adjacency_matrix(labeling_mode) + + def get_adjacency_matrix(self, labeling_mode=None): + if labeling_mode is None: + return self.A + if labeling_mode == 'spatial': + A = tools.get_spatial_graph(self.num_node, self.self_link, self.inward, self.outward) + else: + raise ValueError() + return A + + +if __name__ == '__main__': + import matplotlib.pyplot as plt + import os + + # os.environ['DISPLAY'] = 'localhost:11.0' + A = Graph('spatial').get_adjacency_matrix() + for i in A: + plt.imshow(i, cmap='gray') + plt.show() + print(A) diff --git a/graph/tools.py b/graph/tools.py new file mode 100644 index 0000000..854e30a --- /dev/null +++ b/graph/tools.py @@ -0,0 +1,27 @@ +import numpy as np + + +def edge2mat(link, num_node): + A = np.zeros((num_node, num_node)) + for i, j in link: + A[j, i] = 1 + return A + + +def normalize_digraph(A): # 除以每列的和 + Dl = np.sum(A, 0) + h, w = A.shape + Dn = np.zeros((w, w)) + for i in range(w): + if Dl[i] > 0: + Dn[i, i] = Dl[i] ** (-1) + AD = np.dot(A, Dn) + return AD + + +def get_spatial_graph(num_node, self_link, inward, outward): + I = edge2mat(self_link, num_node) + In = normalize_digraph(edge2mat(inward, num_node)) + Out = normalize_digraph(edge2mat(outward, num_node)) + A = np.stack((I, In, Out)) + return A diff --git a/main.py b/main.py new file mode 100644 index 0000000..7c97b4c --- /dev/null +++ b/main.py @@ -0,0 +1,1053 @@ +#!/usr/bin/env python +from __future__ import print_function +import argparse +import os +import time +import numpy as np +import yaml +import pickle +from collections import OrderedDict +import csv +# torch +import torch +import torch.nn as nn +import torch.optim as optim +from torch.autograd import Variable +from tqdm import tqdm +import shutil +from torch.optim.lr_scheduler import ReduceLROnPlateau +import random +import inspect +import torchmetrics +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd +import wandbFunctions as wandbF +import wandb +import time +from data_gen.getConnectingPoint import * + +wandbFlag = True + +# class LabelSmoothingCrossEntropy(nn.Module): +# def __init__(self): +# super(LabelSmoothingCrossEntropy, self).__init__() +# def forward(self, x, target, smoothing=0.1): +# confidence = 1. - smoothing +# logprobs = F.log_softmax(x, dim=-1) +# nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) +# nll_loss = nll_loss.squeeze(1) +# smooth_loss = -logprobs.mean(dim=-1) +# loss = confidence * nll_loss + smoothing * smooth_loss +# return loss.mean() + + +model_name = '' +def create_one_folder(directory): + if not os.path.exists(directory): + os.makedirs(directory) + +def create_folder(directory): + path = directory.split('/') + total_path ='' + for i in path: + total_path = os.path.join(total_path,i) + #print(i, ' create : ',total_path) + create_one_folder(total_path) + + #print('directory : ',directory) + create_one_folder(directory) + create_one_folder(directory+'/') + #print('created paths') + +def init_seed(value_seed): + torch.cuda.manual_seed_all(value_seed) + torch.manual_seed(value_seed) + np.random.seed(value_seed) + random.seed(value_seed) + #torch.backends.cudnn.enabled = False + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def get_parser(): + # parameter priority: command line > config > default + parser = argparse.ArgumentParser(description='Decoupling Graph Convolution Network with DropGraph Module') + parser.add_argument('--work-dir',default='./work_dir/temp',help='the work folder for storing results') + + parser.add_argument('-model_saved_directory', default='') + parser.add_argument('-experiment_name', default='') + parser.add_argument('--config',default='config/sign/train/train_joint.yaml',help='path to the configuration file') + + # processor + parser.add_argument('--phase', default='train', help='must be train or test') + parser.add_argument('--save-score',type=str2bool,default=False,help='if ture, the classification score will be stored') + + # visulize and debug + parser.add_argument('--seed', type=int, default=1, help='random seed for pytorch') + parser.add_argument('--log-interval',type=int,default=100,help='the interval for printing messages (#iteration)') + parser.add_argument('--save-interval',type=int,default=2,help='the interval for storing models (#iteration)') + parser.add_argument('--eval-interval',type=int,default=5,help='the interval for evaluating models (#iteration)') + parser.add_argument('--print-log',type=str2bool,default=True,help='print logging or not') + parser.add_argument('--show-topk',type=int,default=[1, 5],nargs='+',help='which Top K accuracy will be shown') + + # feeder + parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used') + parser.add_argument('--num-worker',type=int,default=32,help='the number of worker for data loader') + parser.add_argument('--train-feeder-args',default=dict(),help='the arguments of data loader for training') + parser.add_argument('--test-feeder-args',default=dict(),help='the arguments of data loader for test') + + # model + parser.add_argument('--model', default=None, help='the model will be used') + parser.add_argument('--model-args',type=dict,default=dict(),help='the arguments of model') + parser.add_argument('--weights',default=None,help='the weights for network initialization') + parser.add_argument('--ignore-weights',type=str,default=[],nargs='+',help='the name of weights which will be ignored in the initialization') + + # optim + parser.add_argument('--base_lr', type=float, default=0.05, help='initial learning rate') + parser.add_argument('--num_epoch',type=int,default=500,help='stop training in which epoch') + + parser.add_argument('--step',type=int,default=[20, 40, 60],nargs='+',help='the epoch where optimizer reduce the learning rate') + parser.add_argument('--device',type=int,default=0,nargs='+',help='the indexes of GPUs for training or testing') + parser.add_argument('--optimizer', default='SGD', help='type of optimizer') + parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not') + parser.add_argument('--batch-size', type=int, default=32, help='training batch size') + parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size') + parser.add_argument('--start-epoch',type=int,default=0,help='start training from which epoch') + parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer') + parser.add_argument('--keep_rate',type=float,default=0.9,help='keep probability for drop') + parser.add_argument('--groups',type=int,default=8,help='decouple groups') + parser.add_argument('--only_train_part', default=True) + parser.add_argument('--only_train_epoch', default=0) + parser.add_argument('--warm_up_epoch', default=0) + + # Data + + parser.add_argument("--experiment_name", type=str, default="", help="Path to the training dataset CSV file") + parser.add_argument("--training_set_path", type=str, default="", help="Path to the training dataset CSV file") + parser.add_argument("--keypoints_model", type=str, default="openpose", help="Path to the training dataset CSV file") + parser.add_argument("--keypoints_number", type=int, default=29, help="Path to the training dataset CSV file") + parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--num_class", type=int, default=0, help="Path to the testing dataset CSV file") + parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") + parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") + + return parser + + +class Processor(): + """ + Processor for Skeleton-based Action Recgnition + """ + + def __init__(self, arg): + + + self.arg = arg + self.save_arg() + self.connectingPoints(arg) + + + if arg.phase == 'train': + pass + ''' + if not arg.train_feeder_args['debug']: + if os.path.isdir(arg.model_saved_directory): + print('log_dir: ', arg.model_saved_directory, 'already exist') + answer = 'y'#input('delete it? y/n:') + if answer == 'y': + shutil.rmtree(arg.model_saved_directory) + print('Dir removed: ', arg.model_saved_directory) + #input('Refresh the website of tensorboard by pressing any keys') + else: + print('Dir not removed: ', arg.model_saved_directory) + ''' + + self.global_step = 0 + self.load_model() + self.load_optimizer() + self.load_data() + self.lr = self.arg.base_lr + self.best_acc = 0 + self.best_tmp_acc = 0 + + self.maxTestAcc = 0 + self.relative_maxtop5 = 0 + + + def connectingPoints(self,arg): + print('Creating points .. ') + + folderName= '1' # just used to create folder "1" in data/sign/1/ + out_folder='data/sign/' + out_path = os.path.join(out_folder, folderName) + + kp_model = arg.kp_model# 'wholepose' # openpose wholepose mediapipe + dataset = arg.experiment_name# "PUCP" # WLASL PUCP_PSL_DGI156 AEC + numPoints = arg.keypoints_number # number of points used, need to be: 29 or 71 + data_path_train = arg.training_set_path #f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Train.hdf5' + data_path_test = arg.testing_set_path#f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Val.hdf5' + + + model_key_getter = {'mediapipe': get_mp_keys, + 'openpose': get_op_keys, + 'wholepose': get_wp_keys} + + if not os.path.exists(out_path): + os.makedirs(out_path) + + print('kp_model',kp_model) + print('\n',kp_model, dataset,'\n') + print(out_path,'->', 'train') + gendata(data_path_train, out_path, model_key_getter[kp_model], part='train', config=numPoints) + print(out_path,'->', 'val') + gendata(data_path_test, out_path, model_key_getter[kp_model], part='val', config=numPoints) + print('Creating points completed!!! ') + + + def load_data(self): + Feeder = import_class(self.arg.feeder) + ln = Feeder(**self.arg.test_feeder_args) + self.meaning = ln.meaning + #print(ln.meaning) + self.data_loader = dict() + if self.arg.phase == 'train': + self.data_loader['train'] = torch.utils.data.DataLoader( + dataset=Feeder(**self.arg.train_feeder_args), + batch_size=self.arg.batch_size, + shuffle=True, + num_workers=self.arg.num_worker, + drop_last=True, + worker_init_fn=init_seed) + self.data_loader['test'] = torch.utils.data.DataLoader( + dataset=Feeder(**self.arg.test_feeder_args), + batch_size=self.arg.test_batch_size, + shuffle=False, + num_workers=self.arg.num_worker, + drop_last=False, + worker_init_fn=init_seed) + + def load_model(self): + output_device = self.arg.device[0] if type( + self.arg.device) is list else self.arg.device + self.output_device = output_device + + print('^'*20) + print('self.arg.model',self.arg.model) + print('model_args',self.arg.model_args) + + Model = import_class(self.arg.model) + + + shutil.copy2(inspect.getfile(Model), self.arg.work_dir) + self.model = Model(**self.arg.model_args).cuda(output_device) + # print(self.model) + if wandbFlag: + wandbF.watch(self.model) + self.loss = nn.CrossEntropyLoss().cuda(output_device) + + path_model_init = os.path.join(arg.model_saved_directory,arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-"+str(arg.seed)+"-init.pt") + + self.print_log('%'*20) + self.print_log('path_model_init :') + self.print_log(path_model_init) + torch.save(self.model.state_dict(), path_model_init) + self.print_log('%'*20) + + self.m_params = sum(p.numel() for p in self.model.parameters()) + self.trainable_m_params= sum(p.numel() for p in self.model.parameters() if p.requires_grad) + + + + # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) + + + #self.slrt_model_wp.load_state_dict(self.slrt_model_op.state_dict()) + + if self.arg.weights: + self.print_log('Load weights from {}.'.format(self.arg.weights)) + if '.pkl' in self.arg.weights: + with open(self.arg.weights, 'r') as f: + weights = pickle.load(f) + else: + weights = torch.load(self.arg.weights) + self.print_log("weights readed!") + + weights = OrderedDict( + [[k.split('module.')[-1], + v.cuda(output_device)] for k, v in weights.items()]) + + for w in self.arg.ignore_weights: + if weights.pop(w, None) is not None: + self.print_log('Sucessfully Remove Weights: {}.'.format(w)) + else: + self.print_log('Can Not Remove Weights: {}.'.format(w)) + + try: + self.print_log("load state dict weights") + self.model.load_state_dict(weights) + self.print_log("load state dict weights completed!") + + except: + state = self.model.state_dict() + diff = list(set(state.keys()).difference(set(weights.keys()))) + print('Can not find these weights:') + for d in diff: + print(' ' + d) + state.update(weights) + self.model.load_state_dict(state) + + if type(self.arg.device) is list: + if len(self.arg.device) > 1: + self.model = nn.DataParallel( + self.model, + device_ids=self.arg.device, + output_device=output_device) + + def load_optimizer(self): + + if self.arg.optimizer == 'SGD': + + params_dict = dict(self.model.named_parameters()) + params = [] + + for key, value in params_dict.items(): + decay_mult = 0.0 if 'bias' in key else 1.0 + + lr_mult = 1.0 + weight_decay = 1e-4 + + params += [{'params': value, 'lr': self.arg.base_lr, 'lr_mult': lr_mult, + 'decay_mult': decay_mult, 'weight_decay': weight_decay}] + if wandbFlag: + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"], + "momentum":0.9 + } + self.optimizer = optim.SGD( + params, + momentum=0.9, + nesterov=self.arg.nesterov) + elif self.arg.optimizer == 'Adam': + self.optimizer = optim.Adam( + self.model.parameters(), + lr=self.arg.base_lr, + weight_decay=self.arg.weight_decay) + + if wandbFlag: + wandb.config = { + "learning_rate": self.arg.base_lr, + "epochs": self.arg.num_epoch, + "batch_size": self.arg.batch_size, + "weight_decay":self.arg.weight_decay, + "num_class":self.arg.model_args["num_class"] + } + else: + raise ValueError() + + self.lr_scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1, + patience=10, verbose=True, + threshold=1e-4, threshold_mode='rel', + cooldown=0) + + + def save_arg(self): + # save arg + arg_dict = vars(self.arg) + + if not os.path.exists(self.arg.work_dir): + os.makedirs(self.arg.work_dir) + os.makedirs(self.arg.work_dir + '/eval_results') + os.makedirs(self.arg.work_dir + '/eval_results/'+ model_name, exist_ok = True) + + with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f: + yaml.dump(arg_dict, f) + + + def adjust_learning_rate(self, epoch): + if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam': + if epoch < self.arg.warm_up_epoch: + lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch + else: + lr = self.arg.base_lr * ( + 0.1 ** np.sum(epoch >= np.array(self.arg.step))) + for param_group in self.optimizer.param_groups: + param_group['lr'] = lr + return lr + else: + raise ValueError() + + + def print_time(self): + localtime = time.asctime(time.localtime(time.time())) + self.print_log("Local current time : " + localtime) + + + def print_log(self, str, print_time=True): + if print_time: + localtime = time.asctime(time.localtime(time.time())) + str = "[ " + localtime + ' ] ' + str + print(str) + if self.arg.print_log: + with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f: + print(str, file=f) + + + def record_time(self): + self.cur_time = time.time() + return self.cur_time + + + def split_time(self): + split_time = time.time() - self.cur_time + self.record_time() + return split_time + + def train_zero(self, epoch, save_model=False): + self.model.train(False) + loader = self.data_loader['train'] + loss_value = [] + predict_arr = [] + proba_arr = [] + target_arr = [] + + self.record_time() + + timer = dict(dataloader=0.001, model=0.001, statistics=0.001) + process = tqdm(loader) + meaning = list(self.meaning.values()) + + for batch_idx, (data, label, index, name) in enumerate(process): + + self.global_step += 1 + + label_tmp = label.cpu().numpy() + # get data + data = Variable(data.float().cuda(self.output_device), requires_grad=False) + label = Variable(label.long().cuda(self.output_device), requires_grad=False) + timer['dataloader'] += self.split_time() + + # forward + if epoch < 100: + keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 + else: + keep_prob = self.arg.keep_rate + + output = self.model(data, keep_prob) + + if isinstance(output, tuple): + output, l1 = output + l1 = l1.mean() + else: + l1 = 0 + + #print('output',output) + #print('label',label) + loss = self.loss(output, label) + #print('loss',loss) + #for r,s in zip(name,label_tmp): + # meaning[s]= '_'.join(r.split('_')[:-1]) + + loss_value.append(loss.data.cpu().numpy()) + timer['model'] += self.split_time() + + value, predict_label = torch.max(output.data, 1) + + predict_arr.append(predict_label.cpu().numpy()) + target_arr.append(label.data.cpu().numpy()) + proba_arr.append(output.data.cpu().numpy()) + + acc = torch.mean((predict_label == label.data).float()) + + + if self.global_step % self.arg.log_interval == 0: + self.print_log( + '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( + batch_idx, len(loader), loss.data, self.lr)) + timer['statistics'] += self.split_time() + + predict_arr = np.concatenate(predict_arr) + target_arr = np.concatenate(target_arr) + proba_arr = np.concatenate(proba_arr) + accuracy = torch.mean((predict_label == label.data).float()) + if accuracy >= self.best_tmp_acc: + self.best_tmp_acc = accuracy + + if epoch+1 == arg.num_epoch: + if wandbFlag: + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + #y_true=list(label.values()), + #preds=list(predict_label.values()), + y_true=list(target_arr), + preds=list(predict_arr), + class_names=meaning, + title="TRAIN_conf_mat")}) + + if wandbFlag: + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) + # statistics of time consumption and loss + proportion = { + k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) + for k, v in timer.items() + } + + + def train(self, epoch, save_model=False): + self.model.train() + self.print_log('Training epoch: {}'.format(epoch + 1)) + loader = self.data_loader['train'] + self.adjust_learning_rate(epoch) + loss_value = [] + predict_arr = [] + proba_arr = [] + target_arr = [] + + self.record_time() + + timer = dict(dataloader=0.001, model=0.001, statistics=0.001) + process = tqdm(loader) + if epoch >= self.arg.only_train_epoch: + print('only train part, require grad') + for key, value in self.model.named_parameters(): + if 'DecoupleA' in key: + value.requires_grad = True + print(key + '-require grad') + else: + print('only train part, do not require grad') + for key, value in self.model.named_parameters(): + if 'DecoupleA' in key: + value.requires_grad = False + print(key + '-not require grad') + + meaning = list(self.meaning.values()) + + for batch_idx, (data, label, index, name) in enumerate(process): + + self.global_step += 1 + + label_tmp = label.cpu().numpy() + # get data + data = Variable(data.float().cuda( + self.output_device), requires_grad=False) + label = Variable(label.long().cuda( + self.output_device), requires_grad=False) + timer['dataloader'] += self.split_time() + + # forward + if epoch < 100: + keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 + else: + keep_prob = self.arg.keep_rate + + output = self.model(data, keep_prob) + + if isinstance(output, tuple): + output, l1 = output + l1 = l1.mean() + else: + l1 = 0 + loss = self.loss(output, label) + l1 + + #for r,s in zip(name,label_tmp): + # meaning[s]= '_'.join(r.split('_')[:-1]) + + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + loss_value.append(loss.data.cpu().numpy()) + timer['model'] += self.split_time() + + value, predict_label = torch.max(output.data, 1) + + predict_arr.append(predict_label.cpu().numpy()) + target_arr.append(label.data.cpu().numpy()) + proba_arr.append(output.data.cpu().numpy()) + + acc = torch.mean((predict_label == label.data).float()) + + self.lr = self.optimizer.param_groups[0]['lr'] + + if self.global_step % self.arg.log_interval == 0: + self.print_log( + '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( + batch_idx, len(loader), loss.data, self.lr)) + timer['statistics'] += self.split_time() + + predict_arr = np.concatenate(predict_arr) + target_arr = np.concatenate(target_arr) + proba_arr = np.concatenate(proba_arr) + accuracy = torch.mean((predict_label == label.data).float()) + if accuracy >= self.best_tmp_acc: + self.best_tmp_acc = accuracy + + if epoch+1 == arg.num_epoch: + if wandbFlag: + wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + #y_true=list(label.values()), + #preds=list(predict_label.values()), + y_true=list(target_arr), + preds=list(predict_arr), + class_names=meaning, + title="TRAIN_conf_mat")}) + + if wandbFlag: + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 + + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) + # statistics of time consumption and loss + proportion = { + k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) + for k, v in timer.items() + } + + + def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None, isTest=False): + if wrong_file is not None: + f_w = open(wrong_file, 'w') + if result_file is not None: + f_r = open(result_file, 'w') + #if isTest: + submission = dict() + trueLabels = dict() + + meaning = list(self.meaning.values()) + self.model.eval() + with torch.no_grad(): + self.print_log('Eval epoch: {}'.format(epoch + 1)) + for ln in loader_name: + + loss_value = [] + score_frag = [] + right_num_total = 0 + total_num = 0 + loss_total = 0 + step = 0 + process = tqdm(self.data_loader[ln]) + + for batch_idx, (data, label, index, name) in enumerate(process): + label_tmp = label + data = Variable( + data.float().cuda(self.output_device), + requires_grad=False) + label = Variable( + label.long().cuda(self.output_device), + requires_grad=False) + + with torch.no_grad(): + output = self.model(data) + + if isinstance(output, tuple): + output, l1 = output + l1 = l1.mean() + else: + l1 = 0 + + #print('val output',output) + #print('val label',label) + loss = self.loss(output, label) + #print('val loss',loss) + score_frag.append(output.data.cpu().numpy()) + loss_value.append(loss.data.cpu().numpy()) + + _, predict_label = torch.max(output.data, 1) + + #if isTest: + for j in range(output.size(0)): + submission[name[j]] = predict_label[j].item() + trueLabels[name[j]] = label_tmp[j].item() + + step += 1 + + if wrong_file is not None or result_file is not None: + predict = list(predict_label.cpu().numpy()) + true = list(label.data.cpu().numpy()) + for i, x in enumerate(predict): + if result_file is not None: + f_r.write(str(x) + ',' + str(true[i]) + '\n') + if x != true[i] and wrong_file is not None: + f_w.write(str(index[i]) + ',' + + str(x) + ',' + str(true[i]) + '\n') + score = np.concatenate(score_frag) + + if 'UCLA' in arg.experiment_name: + self.data_loader[ln].dataset.sample_name = np.arange( + len(score)) + + accuracy = self.data_loader[ln].dataset.top_k(score, 1) + top5 = self.data_loader[ln].dataset.top_k(score, 5) + + if accuracy > self.best_acc: + self.best_acc = accuracy + + score_dict = dict( + zip(self.data_loader[ln].dataset.sample_name, score)) + + conf_mat = torchmetrics.ConfusionMatrix(num_classes=self.arg.model_args["num_class"]) + ''' + print('self.arg.model_args["num_class"]',self.arg.model_args["num_class"]) + + print('list(submission.values())',list(submission.values())) + print('set(list(submission.values()))',set(list(submission.values()))) + print('len(set(list(submission.values())))',len(set(list(submission.values())))) + + print('list(trueLabels.values())',list(trueLabels.values())) + print('set(list(trueLabels.values()))',set(list(trueLabels.values()))) + print('len(set(list(trueLabels.values())))',len(set(list(trueLabels.values())))) + ''' + confusion_matrix = conf_mat(torch.tensor(list(submission.values())).cpu(), torch.tensor(list(trueLabels.values())).cpu()) + confusion_matrix = confusion_matrix.detach().cpu().numpy() + + plt.figure(figsize = (10,7)) + + group_counts = ["{0:0.0f}".format(value) for value in confusion_matrix.flatten()] + ''' + print('confusion_matrix') + print(confusion_matrix) + print('len confusion_matrix') + + print(len(confusion_matrix)) + for line in confusion_matrix: + print('line',line) + print(len(line)) + ''' + confusion_matrix = np.asarray([line/(np.sum(line)+0.0001) for line in confusion_matrix]) + confusion_matrix = np.nan_to_num(confusion_matrix) + + df_cm = pd.DataFrame(confusion_matrix * 100, index = meaning, columns=meaning) + #size_arr = df_cm.sum(axis = 1) + #maxi = max(size_arr) + + group_percentages = ["{0:.1%}".format(value) for value in confusion_matrix.flatten()] + + annot = ["{1}".format(v2, v1) for v1, v2 in zip(group_counts, group_percentages)] + annot = np.asarray(annot).reshape(self.arg.model_args["num_class"], self.arg.model_args["num_class"]) + fig_ = sns.heatmap(df_cm, vmax=100, vmin=0, annot=annot, annot_kws={"size": 5}, cbar_kws={'format': '%.0f%%', 'ticks':[0, 25, 50, 75, 100]},fmt='', cmap='Blues').get_figure() + plt.ylabel('True label') + plt.xlabel('Predicted label' ) + + plt.close(fig_) + + if wandbFlag: + wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) + + + print('*'*20) + print('*'*20) + print('*'*20) + + print('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl') + + + with open('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( + epoch, accuracy), 'wb') as f: + pickle.dump(score_dict, f) + + # Save the model + state_dict = self.model.state_dict() + weights = OrderedDict([[k.split('module.')[-1], + v.cpu()] for k, v in state_dict.items()]) + + print('*'*20) + print('*'*20) + print('*'*20) + print(self.arg.model_saved_directory) + print(self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + torch.save(weights, self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + + + if epoch + 1 == arg.num_epoch: + + if wandbFlag: + try: + wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ + labels=meaning, classes_to_plot=None)}) + + wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, + labels=meaning, classes_to_plot=None)}) + except: + pass + #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, + # , meaning_3)}) + ''' + wandb.log({"VAL_conf_mat" : wandb.plot.confusion_matrix( + #probs=score, + y_true=list(trueLabels.values()), + preds=list(submission.values()), + class_names=meaning_3, + title="VAL_conf_mat")}) + ''' + + print('Eval Accuracy: ', accuracy, + ' model: ', self.arg.model_saved_directory) + if wandbFlag: + mean_loss = np.mean(loss_value) + if mean_loss>10: + mean_loss = 10 + + self.maxTestAcc = max(accuracy,self.maxTestAcc) + + if self.maxTestAcc == accuracy: + + self.relative_maxtop5 = top5 + + wandbF.wandbValLog(mean_loss, accuracy, top5,self.maxTestAcc,self.relative_maxtop5) + + score_dict = dict( + zip(self.data_loader[ln].dataset.sample_name, score)) + self.print_log('\tMean {} loss of {} batches: {}.'.format( + ln, len(self.data_loader[ln]), np.mean(loss_value))) + for k in self.arg.show_topk: + self.print_log('\tTop{}: {:.2f}%'.format( + k, 100 * self.data_loader[ln].dataset.top_k(score, k))) + ''' + with open('./work_dir/' + arg.experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format( + epoch, accuracy), 'wb') as f: + pickle.dump(score_dict, f) + ''' + + + predLabels = [] + groundLabels = [] + print("END") + if isTest: + #print(submission) + #print(trueLabels) + totalRows = 0 + with open("submission.csv", 'w') as of: + writer = csv.writer(of) + accum = 0 + for trueName, truePred in trueLabels.items(): + + sample = trueName + #print(f'Predicting {sample}', end=' ') + #print(f'as {submission[sample]} - pred {submission[sample]} and real {row[1]}') + match=0 + predLabels.append(submission[sample]) + groundLabels.append(int(truePred)) + if int(truePred) == int(submission[sample]): + match=1 + accum+=1 + totalRows+=1 + + # identifying subject + with open("pucpSubject.csv") as subjectFile: + readerSubject = csv.reader(subjectFile) + idx = int(sample.split('_')[-1]) + subjectName = 'NA' + for name, idxStart, idxEnd in readerSubject: + if (int(idxStart) <= idx) and (idx<= int(idxEnd)): + subjectName = name + break + writer.writerow([sample, submission[sample], str(truePred), str(match), subjectName]) + + return np.mean(loss_value) + + + def start(self): + if self.arg.phase == 'train': + self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg)))) + self.global_step = self.arg.start_epoch * \ + len(self.data_loader['train']) / self.arg.batch_size + + self.model.train(False) + self.train_zero(0, save_model=False) + val_loss = self.eval(0,save_score=self.arg.save_score,loader_name=['test']) + self.model.train(True) + + for epoch in range(self.arg.start_epoch, self.arg.num_epoch): + + save_model = ((epoch + 1) % self.arg.save_interval == 0) or ( + epoch + 1 == self.arg.num_epoch) + + self.train(epoch, save_model=save_model) + + val_loss = self.eval( + epoch, + save_score=self.arg.save_score, + loader_name=['test']) + + # self.lr_scheduler.step(val_loss) + + print('best accuracy: ', self.best_acc, + ' model_name: ', self.arg.model_saved_directory) + + elif self.arg.phase == 'test': + if not self.arg.test_feeder_args['debug']: + wf = self.arg.model_saved_directory + '_wrong.txt' + rf = self.arg.model_saved_directory + '_right.txt' + else: + wf = rf = None + if self.arg.weights is None: + raise ValueError('Please appoint --weights.') + self.arg.print_log = False + self.print_log('Model: {}.'.format(self.arg.model)) + self.print_log('Weights: {}.'.format(self.arg.weights)) + self.eval(epoch=self.arg.start_epoch, save_score=self.arg.save_score, + loader_name=['test'], wrong_file=wf, result_file=rf, isTest=True) + self.print_log('Done.\n') + + +def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + raise argparse.ArgumentTypeError('Boolean value expected.') + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) # import return model + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +if __name__ == '__main__': + + + parser = get_parser() + arg = parser.parse_args() + print('seed :',arg.seed) + init_seed(arg.seed) + + for id_iteration in range(1): + + # load arg form config file + + + + print('arg.config',arg.config) + if arg.config is not None: + with open(arg.config, 'r') as f: + #default_arg = yaml.load(f) + default_arg = yaml.safe_load(f) + print('default_arg',default_arg) + key = vars(arg).keys() + for k in default_arg.keys(): + if k not in key: + print('WRONG ARG: {}'.format(k)) + assert (k in key) + parser.set_defaults(**default_arg) + + # load arg form config file + arg = parser.parse_args() + + arg.training_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + + if arg.database == 'AEC': + arg.num_class = 28 + + if arg.database == 'WLASL': + + arg.num_class = 86 + + if arg.database == 'PUCP': + arg.num_class = 29 + arg.training_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' + + arg.model_args['num_class'] =arg.num_class + arg.model_args['num_point'] =arg.keypoints_number + + arg.model_args['graph_args']['num_node'] =arg.keypoints_number + + #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 + #num_point: 29 # 29 or 71 + + # arg.training_set_path + # arg.keypoints_model + # arg.keypoints_number + # arg.testing_set_path + # arg.experiment_name + # arg.base_lr + # arg.num_epoch + + + config = { + # + "num-epoch": arg.num_epoch, + "weight-decay": arg.weight_decay, + "batch-size":arg.batch_size, + "base-lr": arg.base_lr, + "kp-model": arg.keypoints_model, + "num_points": arg.keypoints_number, + "database": arg.database, + "mode_train":arg.mode_train, + "seed":arg.seed, + "id_iteration":id_iteration, + } + import wandb + import os + + os.environ["WANDB_API_KEY"] = "15f7c99e787e3f99da09963b0cfb45b73656845f" + + if wandbFlag: + wandb.init(project="sign_language_project", + entity="ml_projects", + reinit=True, + config=config) + + config = wandb.config + print('+'*10) + print('config :',config) + print('+'*10) + arg.base_lr = config["base-lr"] + arg.batch_size = config["batch-size"] + arg.weight_decay = config["weight-decay"] + arg.num_epoch = config["num-epoch"] + arg.kp_model = config["kp-model"] + arg.database = config["database"] + + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + arg.work_dir = "work_dir/"+arg.experiment_name+"/" + + print('*'*20) + print('*'*20) + + print('model_saved_directory',arg.model_saved_directory) + print('work_dir',arg.work_dir) + + + create_folder(arg.model_saved_directory) + create_folder(arg.work_dir) + create_folder('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/') + + # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} + #os.makedirs(arg.file_name,exist_ok=True) + + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) + + model_name = runAndModelName + print('model_name : ',model_name) + if wandbFlag: + wandb.run.name = runAndModelName + wandb.run.save() + + + + print("*"*30) + print("*"*30) + print(arg) + print("*"*30) + print("*"*30) + print(arg.train_feeder_args) + print('train_feeder_args',arg.train_feeder_args) + processor = Processor(arg) + processor.start() + if wandbFlag: + wandb.finish() + print("wandb finish") diff --git a/model/__init__.py b/model/__init__.py new file mode 100644 index 0000000..a8a7cbd --- /dev/null +++ b/model/__init__.py @@ -0,0 +1,3 @@ +from . import decouple_gcn_attn +from . import dropSke +from . import dropT \ No newline at end of file diff --git a/model/decouple_gcn_attn.py b/model/decouple_gcn_attn.py new file mode 100644 index 0000000..9f8bbae --- /dev/null +++ b/model/decouple_gcn_attn.py @@ -0,0 +1,284 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +import numpy as np +import math +from model.dropSke import DropBlock_Ske +from model.dropT import DropBlockT_1d + + +def import_class(name): + components = name.split('.') + mod = __import__(components[0]) + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + +def conv_branch_init(conv): + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) + nn.init.constant(conv.bias, 0) + + +def conv_init(conv): + nn.init.kaiming_normal(conv.weight, mode='fan_out') + nn.init.constant(conv.bias, 0) + + +def bn_init(bn, scale): + nn.init.constant(bn.weight, scale) + nn.init.constant(bn.bias, 0) + + +class unit_tcn(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): + super(unit_tcn, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + self.dropS = DropBlock_Ske(num_point=num_point) + self.dropT = DropBlockT_1d(block_size=block_size) + + def forward(self, x, keep_prob, A): + x = self.bn(self.conv(x)) + x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) + return x + + +class unit_tcn_skip(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): + super(unit_tcn_skip, self).__init__() + pad = int((kernel_size - 1) / 2) + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), + stride=(stride, 1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + conv_init(self.conv) + bn_init(self.bn, 1) + + def forward(self, x): + x = self.bn(self.conv(x)) + return x + + +class unit_gcn(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): + super(unit_gcn, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.num_point = num_point + self.groups = groups + self.num_subset = num_subset + self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ + 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) + + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels) + ) + else: + self.down = lambda x: x + + self.bn0 = nn.BatchNorm2d(out_channels * num_subset) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU() + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + conv_init(m) + elif isinstance(m, nn.BatchNorm2d): + bn_init(m, 1) + bn_init(self.bn, 1e-6) + + self.Linear_weight = nn.Parameter(torch.zeros( + in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.normal_(self.Linear_weight, 0, math.sqrt( + 0.5 / (out_channels * num_subset))) + + self.Linear_bias = nn.Parameter(torch.zeros( + 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) + nn.init.constant(self.Linear_bias, 1e-6) + + eye_array = [] + for i in range(out_channels): + eye_array.append(torch.eye(num_point)) + self.eyes = nn.Parameter(torch.tensor(torch.stack( + eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] + + def norm(self, A): + b, c, h, w = A.size() + A = A.view(c, self.num_point, self.num_point) + D_list = torch.sum(A, 1).view(c, 1, self.num_point) + D_list_12 = (D_list + 0.001)**(-1) + D_12 = self.eyes * D_list_12 + A = torch.bmm(A, D_12).view(b, c, h, w) + return A + + def forward(self, x0): + learn_A = self.DecoupleA.repeat( + 1, self.out_channels // self.groups, 1, 1) + norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( + learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) + + x = torch.einsum( + 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() + x = x + self.Linear_bias + x = self.bn0(x) + + n, kc, t, v = x.size() + x = x.view(n, self.num_subset, kc // self.num_subset, t, v) + x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) + + x = self.bn(x) + x += self.down(x0) + x = self.relu(x) + return x + + +class TCN_GCN_unit(nn.Module): + def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True, attention=True): + super(TCN_GCN_unit, self).__init__() + num_jpts = A.shape[-1] + self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) + self.tcn1 = unit_tcn(out_channels, out_channels, + stride=stride, num_point=num_point) + self.relu = nn.ReLU() + + self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ + 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) + + if not residual: + self.residual = lambda x: 0 + + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + + else: + self.residual = unit_tcn_skip( + in_channels, out_channels, kernel_size=1, stride=stride) + self.dropSke = DropBlock_Ske(num_point=num_point) + self.dropT_skip = DropBlockT_1d(block_size=block_size) + self.attention = attention + if attention: + print('Attention Enabled!') + self.sigmoid = nn.Sigmoid() + # temporal attention + self.conv_ta = nn.Conv1d(out_channels, 1, 9, padding=4) + nn.init.constant_(self.conv_ta.weight, 0) + nn.init.constant_(self.conv_ta.bias, 0) + # s attention + ker_jpt = num_jpts - 1 if not num_jpts % 2 else num_jpts + pad = (ker_jpt - 1) // 2 + self.conv_sa = nn.Conv1d(out_channels, 1, ker_jpt, padding=pad) + nn.init.xavier_normal_(self.conv_sa.weight) + nn.init.constant_(self.conv_sa.bias, 0) + # channel attention + rr = 2 + self.fc1c = nn.Linear(out_channels, out_channels // rr) + self.fc2c = nn.Linear(out_channels // rr, out_channels) + nn.init.kaiming_normal_(self.fc1c.weight) + nn.init.constant_(self.fc1c.bias, 0) + nn.init.constant_(self.fc2c.weight, 0) + nn.init.constant_(self.fc2c.bias, 0) + + def forward(self, x, keep_prob): + y = self.gcn1(x) + if self.attention: + # spatial attention + se = y.mean(-2) # N C V + se1 = self.sigmoid(self.conv_sa(se)) + y = y * se1.unsqueeze(-2) + y + # a1 = se1.unsqueeze(-2) + + # temporal attention + se = y.mean(-1) + se1 = self.sigmoid(self.conv_ta(se)) + y = y * se1.unsqueeze(-1) + y + # a2 = se1.unsqueeze(-1) + + # channel attention + se = y.mean(-1).mean(-1) + se1 = self.relu(self.fc1c(se)) + se2 = self.sigmoid(self.fc2c(se1)) + y = y * se2.unsqueeze(-1).unsqueeze(-1) + y + # a3 = se2.unsqueeze(-1).unsqueeze(-1) + + y = self.tcn1(y, keep_prob, self.A) + x_skip = self.dropT_skip(self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) + return self.relu(y + x_skip) + + +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + self.graph.num_node = num_point + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(in_channels, 64, A, groups, num_point, + block_size, residual=False) + self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit( + 64, 128, A, groups, num_point, block_size, stride=2) + self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups, + num_point, block_size, stride=2) + self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + + self.fc = nn.Linear(256, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, 1.0) + x = self.l4(x, 1.0) + x = self.l5(x, 1.0) + x = self.l6(x, 1.0) + x = self.l7(x, keep_prob) + x = self.l8(x, keep_prob) + x = self.l9(x, keep_prob) + x = self.l10(x, keep_prob) + + # N*M,C,T,V + c_new = x.size(1) + + # print(x.size()) + # print(N, M, c_new) + + # x = x.view(N, M, c_new, -1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) diff --git a/model/dropSke.py b/model/dropSke.py new file mode 100644 index 0000000..c18456a --- /dev/null +++ b/model/dropSke.py @@ -0,0 +1,36 @@ +import torch +import torch.nn.functional as F +from torch import nn +import warnings + + +class DropBlock_Ske(nn.Module): + def __init__(self, num_point, block_size=7): + super(DropBlock_Ske, self).__init__() + self.keep_prob = 0.0 + self.block_size = block_size + self.num_point = num_point + + def forward(self, input, keep_prob, A): # n,c,t,v + self.keep_prob = keep_prob + if not self.training or self.keep_prob == 1: + return input + n, c, t, v = input.size() + + input_abs = torch.mean(torch.mean( + torch.abs(input), dim=2), dim=1).detach() + input_abs = input_abs / torch.sum(input_abs) * input_abs.numel() + if self.num_point == 25: # Kinect V2 + gamma = (1. - self.keep_prob) / (1 + 1.92) + elif self.num_point == 20: # Kinect V1 + gamma = (1. - self.keep_prob) / (1 + 1.9) + else: + gamma = (1. - self.keep_prob) / (1 + 1.92) + warnings.warn('undefined skeleton graph') + M_seed = torch.bernoulli(torch.clamp( + input_abs * gamma, max=1.0)).to(device=input.device, dtype=input.dtype) + M = torch.matmul(M_seed, A) + M[M > 0.001] = 1.0 + M[M < 0.5] = 0.0 + mask = (1 - M).view(n, 1, 1, self.num_point) + return input * mask * mask.numel() / mask.sum() diff --git a/model/dropT.py b/model/dropT.py new file mode 100644 index 0000000..6ed1167 --- /dev/null +++ b/model/dropT.py @@ -0,0 +1,24 @@ +import torch +import torch.nn.functional as F +from torch import nn + +class DropBlockT_1d(nn.Module): + def __init__(self, block_size=7): + super(DropBlockT_1d, self).__init__() + self.keep_prob = 0.0 + self.block_size = block_size + + def forward(self, input, keep_prob): + self.keep_prob = keep_prob + if not self.training or self.keep_prob == 1: + return input + n,c,t,v = input.size() + + input_abs = torch.mean(torch.mean(torch.abs(input),dim=3),dim=1).detach() + input_abs = (input_abs/torch.sum(input_abs)*input_abs.numel()).view(n,1,t) + gamma = (1. - self.keep_prob) / self.block_size + input1 = input.permute(0,1,3,2).contiguous().view(n,c*v,t) + M = torch.bernoulli(torch.clamp(input_abs * gamma, max=1.0)).repeat(1,c*v,1) + Msum = F.max_pool1d(M, kernel_size=[self.block_size], stride=1, padding=self.block_size // 2) + mask = (1 - Msum).to(device=input.device, dtype=input.dtype) + return (input1 * mask * mask.numel() /mask.sum()).view(n,c,v,t).permute(0,1,3,2) diff --git a/points_51.csv b/points_51.csv new file mode 100644 index 0000000..1cd9e02 --- /dev/null +++ b/points_51.csv @@ -0,0 +1,52 @@ +tar_name,ori_name,mp_pos,wb_pos,op_pos,origin,tarjet +pose_nose,pose_nose,1,1,1,1,1 +pose_left_eye,pose_nose,3,2,17,1,2 +pose_right_eye,pose_nose,6,3,16,1,3 +pose_left_shoulder,pose_nose,12,6,6,1,4 +pose_right_shoulder,pose_nose,13,7,3,1,5 +pose_left_elbow,pose_left_shoulder,14,8,7,4,6 +pose_right_elbow,pose_right_shoulder,15,9,4,5,7 +pose_left_wrist,pose_left_elbow,16,10,8,6,8 +pose_right_wrist,pose_right_elbow,17,11,5,7,9 +face_right_mouth_up,pose_nose,71,74,76,1,10 +face_right_eyebrow_inner,pose_nose,89,45,47,1,11 +face_right_mouth_corner,face_right_mouth_up,91,72,74,10,12 +face_right_eyebrow_outer,face_right_eyebrow_middle,104,41,43,15,13 +face_right_mouth_down,face_right_mouth_corner,118,80,82,12,14 +face_right_eyebrow_middle,face_right_eyebrow_inner,139,43,45,11,15 +face_left_mouth_up,pose_nose,301,76,78,1,16 +face_left_eyebrow_inner,pose_nose,319,46,48,1,17 +face_left_mouth_corner,face_left_mouth_up,321,78,80,16,18 +face_left_eyebrow_outer,face_left_eyebrow_middle,334,50,52,21,19 +face_left_mouth_down,face_left_mouth_corner,348,82,84,18,20 +face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,17,21 +leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,22 +leftHand_thumb_mcp,leftHand_thumb_cmc,504,94,98,22,23 +leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,23,24 +leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,25 +leftHand_index_finger_pip,leftHand_index_finger_mcp,508,98,102,25,26 +leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,26,27 +leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,28 +leftHand_middle_finger_pip,leftHand_middle_finger_mcp,512,102,106,28,29 +leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,29,30 +leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,31 +leftHand_ring_finger_pip,leftHand_ring_finger_mcp,516,106,110,31,32 +leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,32,33 +leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,34 +leftHand_pinky_pip,leftHand_pinky_mcp,520,110,114,34,35 +leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,35,36 +rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,37 +rightHand_thumb_mcp,rightHand_thumb_cmc,525,115,119,37,38 +rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,38,39 +rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,40 +rightHand_index_finger_pip,rightHand_index_finger_mcp,529,119,123,40,41 +rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,41,42 +rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,43 +rightHand_middle_finger_pip,rightHand_middle_finger_mcp,533,123,127,43,44 +rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,44,45 +rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,46 +rightHand_ring_finger_pip,rightHand_ring_finger_mcp,537,127,131,46,47 +rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,47,48 +rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,49 +rightHand_pinky_pip,rightHand_pinky_mcp,541,131,135,49,50 +rightHand_pinky_tip,rightHand_pinky_dip,543,133,137,50,51 diff --git a/runModel.sh b/runModel.sh new file mode 100644 index 0000000..967501d --- /dev/null +++ b/runModel.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + #num_point: 29 # 29 or 71 + +declare -a points=(29 71 29 71 29 71) +declare -a lrs=(0.05 0.1 0.05 0.1 0.1 0.05) +declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") + +for i in 1 +do + for j in 0 1 2 3 4 5 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done + +""" + +for i in 5 15 25 35 45 55 65 75 85 95 +do + for j in 0 1 2 3 4 5 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done +""" diff --git a/runModelTest.sh b/runModelTest.sh new file mode 100644 index 0000000..d0558f6 --- /dev/null +++ b/runModelTest.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +######################################################### +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + #num_point: 29 # 29 or 71 or 51 + +declare -a points=(51 51 51) +declare -a lrs=(0.05 0.05 0.05) +declare -a datasets=("PUCP" "AEC" "WLASL") + + + +for i in 1 +do + for j in 0 1 2 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done + From fec729a52f42fdbee2c78d74a0494adbc7874f4d Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Fri, 28 Oct 2022 00:30:44 +0000 Subject: [PATCH 35/56] 51 puntos funcionando --- SL-GCN/config/sign/train/train_joint.yaml | 4 +- SL-GCN/points_51.csv | 70 +- SL-GCN/runModelTest.sh | 24 +- config/sign/finetune/train_bone.yaml | 50 - config/sign/finetune/train_bone_motion.yaml | 50 - config/sign/finetune/train_joint.yaml | 50 - config/sign/finetune/train_joint_motion.yaml | 49 - config/sign/test/test_bone.yaml | 52 - config/sign/test/test_bone_motion.yaml | 52 - config/sign/test/test_joint.yaml | 51 - config/sign/test/test_joint_motion.yaml | 51 - config/sign/test_finetuned/test_bone.yaml | 52 - .../sign/test_finetuned/test_bone_motion.yaml | 52 - config/sign/test_finetuned/test_joint.yaml | 51 - .../test_finetuned/test_joint_motion.yaml | 51 - config/sign/train/train_bone.yaml | 49 - config/sign/train/train_bone_motion.yaml | 49 - config/sign/train/train_joint.yaml | 54 - config/sign/train/train_joint_motion.yaml | 49 - data_gen/__init__.py | 0 data_gen/gen_bone_data.py | 74 -- data_gen/gen_motion_data.py | 33 - data_gen/getConnectingPoint.py | 147 --- data_gen/sign_gendata.py | 98 -- feeders/__init__.py | 2 - feeders/feeder.py | 249 ---- feeders/tools.py | 161 --- graph/__init__.py | 2 - graph/sign_27.py | 80 -- graph/tools.py | 27 - main.py | 1053 ----------------- model/__init__.py | 3 - model/decouple_gcn_attn.py | 284 ----- model/dropSke.py | 36 - model/dropT.py | 24 - points_51.csv | 52 - runModel.sh | 32 - runModelTest.sh | 23 - 38 files changed, 50 insertions(+), 3240 deletions(-) delete mode 100644 config/sign/finetune/train_bone.yaml delete mode 100644 config/sign/finetune/train_bone_motion.yaml delete mode 100644 config/sign/finetune/train_joint.yaml delete mode 100644 config/sign/finetune/train_joint_motion.yaml delete mode 100644 config/sign/test/test_bone.yaml delete mode 100644 config/sign/test/test_bone_motion.yaml delete mode 100644 config/sign/test/test_joint.yaml delete mode 100644 config/sign/test/test_joint_motion.yaml delete mode 100644 config/sign/test_finetuned/test_bone.yaml delete mode 100644 config/sign/test_finetuned/test_bone_motion.yaml delete mode 100644 config/sign/test_finetuned/test_joint.yaml delete mode 100644 config/sign/test_finetuned/test_joint_motion.yaml delete mode 100644 config/sign/train/train_bone.yaml delete mode 100644 config/sign/train/train_bone_motion.yaml delete mode 100644 config/sign/train/train_joint.yaml delete mode 100644 config/sign/train/train_joint_motion.yaml delete mode 100644 data_gen/__init__.py delete mode 100644 data_gen/gen_bone_data.py delete mode 100644 data_gen/gen_motion_data.py delete mode 100644 data_gen/getConnectingPoint.py delete mode 100644 data_gen/sign_gendata.py delete mode 100644 feeders/__init__.py delete mode 100644 feeders/feeder.py delete mode 100644 feeders/tools.py delete mode 100644 graph/__init__.py delete mode 100644 graph/sign_27.py delete mode 100644 graph/tools.py delete mode 100644 main.py delete mode 100644 model/__init__.py delete mode 100644 model/decouple_gcn_attn.py delete mode 100644 model/dropSke.py delete mode 100644 model/dropT.py delete mode 100644 points_51.csv delete mode 100644 runModel.sh delete mode 100644 runModelTest.sh diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 6049fe1..b7952fb 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -44,10 +44,10 @@ step: [150, 200] # training #device: [0, 1,2,3 ] -device: [0] +device: [0,1,2,3] keep_rate: 0.9 only_train_epoch: 1 -batch_size: 8 +batch_size: 64 test_batch_size: 8 num_epoch: 250 nesterov: True diff --git a/SL-GCN/points_51.csv b/SL-GCN/points_51.csv index dbecc24..1cd9e02 100644 --- a/SL-GCN/points_51.csv +++ b/SL-GCN/points_51.csv @@ -14,39 +14,39 @@ face_right_mouth_corner,face_right_mouth_up,91,72,74,10,12 face_right_eyebrow_outer,face_right_eyebrow_middle,104,41,43,15,13 face_right_mouth_down,face_right_mouth_corner,118,80,82,12,14 face_right_eyebrow_middle,face_right_eyebrow_inner,139,43,45,11,15 -face_right_eye_outer,face_right_eyebrow_outer,164,60,62,13,16 -face_right_jaw_up,face_right_jaw_middle,166,27,29,20,17 -face_right_eye_inner,face_right_eye_outer,167,63,65,16,18 -face_right_jaw_down,pose_nose,182,31,33,1,19 -face_right_jaw_middle,face_right_jaw_down,206,29,31,19,20 -face_left_mouth_up,pose_nose,301,76,78,1,21 -face_left_eyebrow_inner,pose_nose,319,50,48,26,22 -face_left_mouth_corner,face_left_mouth_up,321,78,80,21,23 -face_left_eyebrow_outer,face_left_eyebrow_middle,334,46,52,1,24 -face_left_mouth_down,face_left_mouth_corner,348,82,84,23,25 -face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,24,26 -face_left_eye_outer,face_left_eyebrow_outer,393,69,71,22,27 -face_left_jaw_up,face_left_jaw_middle,395,37,39,31,28 -face_left_eye_inner,face_left_eye_outer,396,66,68,27,29 -face_left_jaw_down,pose_nose,411,33,35,1,30 -face_left_jaw_middle,face_left_jaw_down,431,35,37,30,31 -leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,32 -leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,32,33 -leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,34 -leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,34,35 -leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,36 -leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,36,37 -leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,38 -leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,38,39 -leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,40 -leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,40,41 -rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,42 -rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,42,43 -rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,44 -rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,44,45 -rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,46 -rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,46,47 -rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,48 -rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,48,49 -rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,50 +face_left_mouth_up,pose_nose,301,76,78,1,16 +face_left_eyebrow_inner,pose_nose,319,46,48,1,17 +face_left_mouth_corner,face_left_mouth_up,321,78,80,16,18 +face_left_eyebrow_outer,face_left_eyebrow_middle,334,50,52,21,19 +face_left_mouth_down,face_left_mouth_corner,348,82,84,18,20 +face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,17,21 +leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,22 +leftHand_thumb_mcp,leftHand_thumb_cmc,504,94,98,22,23 +leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,23,24 +leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,25 +leftHand_index_finger_pip,leftHand_index_finger_mcp,508,98,102,25,26 +leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,26,27 +leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,28 +leftHand_middle_finger_pip,leftHand_middle_finger_mcp,512,102,106,28,29 +leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,29,30 +leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,31 +leftHand_ring_finger_pip,leftHand_ring_finger_mcp,516,106,110,31,32 +leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,32,33 +leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,34 +leftHand_pinky_pip,leftHand_pinky_mcp,520,110,114,34,35 +leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,35,36 +rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,37 +rightHand_thumb_mcp,rightHand_thumb_cmc,525,115,119,37,38 +rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,38,39 +rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,40 +rightHand_index_finger_pip,rightHand_index_finger_mcp,529,119,123,40,41 +rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,41,42 +rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,43 +rightHand_middle_finger_pip,rightHand_middle_finger_mcp,533,123,127,43,44 +rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,44,45 +rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,46 +rightHand_ring_finger_pip,rightHand_ring_finger_mcp,537,127,131,46,47 +rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,47,48 +rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,49 +rightHand_pinky_pip,rightHand_pinky_mcp,541,131,135,49,50 rightHand_pinky_tip,rightHand_pinky_dip,543,133,137,50,51 diff --git a/SL-GCN/runModelTest.sh b/SL-GCN/runModelTest.sh index ddef9e5..c9df403 100644 --- a/SL-GCN/runModelTest.sh +++ b/SL-GCN/runModelTest.sh @@ -1,21 +1,23 @@ #!/bin/bash +######################################################### #python main.py --config config/sign/train/train_joint.yaml #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 - #num_point: 29 # 29 or 71 + #num_point: 29 # 29 or 71 or 51 -declare -a points=(51 29 71 29 71 29 71) -declare -a lrs=(0.05 0.05 0.1 0.05 0.1 0.1 0.05) -declare -a datasets=("PUCP" "PUCP" "PUCP" "AEC" "AEC" "WLASL" "WLASL") +declare -a points=(51 51 51) +declare -a lrs=(0.05 0.05 0.05) +declare -a datasets=("PUCP" "AEC" "WLASL") -# for j in 0 1 2 3 4 5 -for i in 1 + +for i in 0 5 15 25 35 do - for j in 0 + for j in 0 1 2 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train cris_40points_1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done -done \ No newline at end of file +done + diff --git a/config/sign/finetune/train_bone.yaml b/config/sign/finetune/train_bone.yaml deleted file mode 100644 index ed0da3a..0000000 --- a/config/sign/finetune/train_bone.yaml +++ /dev/null @@ -1,50 +0,0 @@ -Experiment_name: bone_27_2_finetune - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_bone2.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -weights: final_models/27_2/bone_epoch_239_9470.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_bone_motion.yaml b/config/sign/finetune/train_bone_motion.yaml deleted file mode 100644 index 9aba2df..0000000 --- a/config/sign/finetune/train_bone_motion.yaml +++ /dev/null @@ -1,50 +0,0 @@ -Experiment_name: bone_motion_27_2_finetune - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_bone2_motion.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -weights: final_models/27_2/bone_motion_217_9249.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_joint.yaml b/config/sign/finetune/train_joint.yaml deleted file mode 100644 index 0b3f480..0000000 --- a/config/sign/finetune/train_joint.yaml +++ /dev/null @@ -1,50 +0,0 @@ -Experiment_name: joint_27_2_finetune - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_joint.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - window_size: 100 - random_shift: True - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: False - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -weights: final_models/27_2/joint_epoch_226_9468.pt -# start_epoch: 188 -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/finetune/train_joint_motion.yaml b/config/sign/finetune/train_joint_motion.yaml deleted file mode 100644 index 86382f2..0000000 --- a/config/sign/finetune/train_joint_motion.yaml +++ /dev/null @@ -1,49 +0,0 @@ -Experiment_name: joint_motion_27_2_finetune -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_joint_motion.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -weights: final_models/27_2/joint_motion_248_9301.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_bone.yaml b/config/sign/test/test_bone.yaml deleted file mode 100644 index f97983a..0000000 --- a/config/sign/test/test_bone.yaml +++ /dev/null @@ -1,52 +0,0 @@ -Experiment_name: bone_27_2_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_bone.npy - label_path: ./data/sign/27_2/train_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2/bone_epoch_239_9470.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_bone_motion.yaml b/config/sign/test/test_bone_motion.yaml deleted file mode 100644 index 4a34f20..0000000 --- a/config/sign/test/test_bone_motion.yaml +++ /dev/null @@ -1,52 +0,0 @@ -Experiment_name: bone_motion_27_2_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_bone_motion.npy - label_path: ./data/sign/27_2/train_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2/bone_motion_217_9249.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_joint.yaml b/config/sign/test/test_joint.yaml deleted file mode 100644 index 7a147bd..0000000 --- a/config/sign/test/test_joint.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Experiment_name: joint_27_2_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_joint.npy - label_path: ./data/sign/27_2/train_labels.pkl - debug: False - random_choose: True - window_size: 100 - random_shift: True - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: False - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2/joint_epoch_226_9468.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test/test_joint_motion.yaml b/config/sign/test/test_joint_motion.yaml deleted file mode 100644 index 8419ddd..0000000 --- a/config/sign/test/test_joint_motion.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Experiment_name: joint_motion_27_2_test -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_joint_motion.npy - label_path: ./data/sign/27_2/train_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2/joint_motion_248_9301.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_bone.yaml b/config/sign/test_finetuned/test_bone.yaml deleted file mode 100644 index 9561ac8..0000000 --- a/config/sign/test_finetuned/test_bone.yaml +++ /dev/null @@ -1,52 +0,0 @@ -Experiment_name: bone_27_2_finetune_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_bone.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2_finetuned/bone_finetuned.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_bone_motion.yaml b/config/sign/test_finetuned/test_bone_motion.yaml deleted file mode 100644 index 5ed26ad..0000000 --- a/config/sign/test_finetuned/test_bone_motion.yaml +++ /dev/null @@ -1,52 +0,0 @@ -Experiment_name: bone_motion_27_2_finetune_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_bone_motion.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_bone_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2_finetuned/bone_motion_finetuned.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_joint.yaml b/config/sign/test_finetuned/test_joint.yaml deleted file mode 100644 index b9e9afe..0000000 --- a/config/sign/test_finetuned/test_joint.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Experiment_name: joint_27_2_finetune_test - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_joint.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - window_size: 100 - random_shift: True - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: False - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2_finetuned/joint_finetuned.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/test_finetuned/test_joint_motion.yaml b/config/sign/test_finetuned/test_joint_motion.yaml deleted file mode 100644 index 3ab3fec..0000000 --- a/config/sign/test_finetuned/test_joint_motion.yaml +++ /dev/null @@ -1,51 +0,0 @@ -Experiment_name: joint_motion_27_2_finetune_test -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_val_data_joint_motion.npy - label_path: ./data/sign/27_2/train_val_labels.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/test_data_joint_motion.npy - label_path: ./data/sign/27_2/test_labels_pseudo.pkl - random_mirror: False - normalization: True - is_vector: True - debug: False - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.01 -step: [50] - -# training -device: [0,1] -phase: test -weights: final_models/27_2_finetuned/joint_motion_finetuned.pt -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 100 -nesterov: True -warm_up_epoch: 0 \ No newline at end of file diff --git a/config/sign/train/train_bone.yaml b/config/sign/train/train_bone.yaml deleted file mode 100644 index f1cff0d..0000000 --- a/config/sign/train/train_bone.yaml +++ /dev/null @@ -1,49 +0,0 @@ -Experiment_name: sign_bone_final - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_bone.npy - label_path: ./data/sign/27_2/train_label.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/val_data_bone.npy - label_path: ./data/sign/27_2/val_gt.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.1 -step: [150, 200] - -# training -device: [4,5,6,7] -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 250 -nesterov: True -warm_up_epoch: 20 \ No newline at end of file diff --git a/config/sign/train/train_bone_motion.yaml b/config/sign/train/train_bone_motion.yaml deleted file mode 100644 index 83c912b..0000000 --- a/config/sign/train/train_bone_motion.yaml +++ /dev/null @@ -1,49 +0,0 @@ -Experiment_name: sign_bone_motion_final - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_bone_motion.npy - label_path: ./data/sign/27_2/train_label.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/val_data_bone_motion.npy - label_path: ./data/sign/27_2/val_gt.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.1 -step: [150, 200] - -# training -device: [4,5,6,7] -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 250 -nesterov: True -warm_up_epoch: 20 \ No newline at end of file diff --git a/config/sign/train/train_joint.yaml b/config/sign/train/train_joint.yaml deleted file mode 100644 index 6049fe1..0000000 --- a/config/sign/train/train_joint.yaml +++ /dev/null @@ -1,54 +0,0 @@ -#Experiment_name: sign_joint_final - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: data/sign/1/train_data_joint.npy - label_path: data/sign/1/train_label.pkl - meaning_path: data/sign/1/meaning.pkl - debug: False - random_choose: True - window_size: 100 - random_shift: True - normalization: True - random_mirror: True - random_mirror_p: 0.5 - is_vector: False - -test_feeder_args: - data_path: data/sign/1/val_data_joint.npy - label_path: data/sign/1/val_label.pkl - meaning_path: data/sign/1/meaning.pkl - random_mirror: False - normalization: True - -# model -# 226 (num classes) -model: model.decouple_gcn_attn.Model -model_args: - #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 - #num_point: 29 # 29 or 71 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - num_node: 29 - -#optim -weight_decay: 0.0001 -base_lr: 0.1 -step: [150, 200] - -# training -#device: [0, 1,2,3 ] - -device: [0] -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 8 -test_batch_size: 8 -num_epoch: 250 -nesterov: True -warm_up_epoch: 20 diff --git a/config/sign/train/train_joint_motion.yaml b/config/sign/train/train_joint_motion.yaml deleted file mode 100644 index 5ef0f5a..0000000 --- a/config/sign/train/train_joint_motion.yaml +++ /dev/null @@ -1,49 +0,0 @@ -Experiment_name: sign_joint_motion_final - -# feeder -feeder: feeders.feeder.Feeder -train_feeder_args: - data_path: ./data/sign/27_2/train_data_joint_motion.npy - label_path: ./data/sign/27_2/train_label.pkl - debug: False - random_choose: True - random_shift: True - window_size: 100 - random_mirror: True - random_mirror_p: 0.5 - normalization: True - is_vector: True - -test_feeder_args: - data_path: ./data/sign/27_2/val_data_joint_motion.npy - label_path: ./data/sign/27_2/val_gt.pkl - random_mirror: False - normalization: True - is_vector: True - -# model -model: model.decouple_gcn_attn.Model -model_args: - num_class: 226 - num_point: 27 - num_person: 1 - graph: graph.sign_27.Graph - groups: 16 - block_size: 41 - graph_args: - labeling_mode: 'spatial' - -#optim -weight_decay: 0.0001 -base_lr: 0.1 -step: [150, 200] - -# training -device: [0,1,2,3] -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 64 -test_batch_size: 64 -num_epoch: 250 -nesterov: True -warm_up_epoch: 20 \ No newline at end of file diff --git a/data_gen/__init__.py b/data_gen/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/data_gen/gen_bone_data.py b/data_gen/gen_bone_data.py deleted file mode 100644 index dbe36cb..0000000 --- a/data_gen/gen_bone_data.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import numpy as np -from numpy.lib.format import open_memmap - -paris = { - 'ntu/xview': ( - (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), - (13, 1), - (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), - (25, 12) - ), - 'ntu/xsub': ( - (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), - (13, 1), - (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), - (25, 12) - ), - 'ntu120/xsetup': ( - (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), - (13, 1), - (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), - (25, 12) - ), - 'ntu120/xsub': ( - (1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11), - (13, 1), - (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25), - (25, 12) - ), - 'sign/27': ((5, 6), (5, 7), - (6, 8), (8, 10), (7, 9), (9, 11), - (12,13),(12,14),(12,16),(12,18),(12,20), - (14,15),(16,17),(18,19),(20,21), - (22,23),(22,24),(22,26),(22,28),(22,30), - (24,25),(26,27),(28,29),(30,31), - (10,12),(11,22) - ), - 'sign/27_2': ((5, 6), (5, 7), - (6, 8), (8, 10), (7, 9), (9, 11), - (12,13),(12,14),(12,16),(12,18),(12,20), - (14,15),(16,17),(18,19),(20,21), - (22,23),(22,24),(22,26),(22,28),(22,30), - (24,25),(26,27),(28,29),(30,31), - (10,12),(11,22) - ) -} - -sets = { - 'train', 'val', 'test' -} - -datasets = { - 'sign/27_2' -} - -from tqdm import tqdm - -for dataset in datasets: - for set in sets: - print(dataset, set) - data = np.load('../data/{}/{}_data_joint.npy'.format(dataset, set)) - N, C, T, V, M = data.shape - fp_sp = open_memmap( - '../data/{}/{}_data_bone.npy'.format(dataset, set), - dtype='float32', - mode='w+', - shape=(N, 3, T, V, M)) - - fp_sp[:, :C, :, :, :] = data - for v1, v2 in tqdm(paris[dataset]): - v1 -= 5 - v2 -= 5 - fp_sp[:, :, :, v2, :] = data[:, :, :, v2, :] - data[:, :, :, v1, :] - # fp_sp[:, :, :, v1, :] = data[:, :, :, v1, :] - data[:, :, :, v2, :] diff --git a/data_gen/gen_motion_data.py b/data_gen/gen_motion_data.py deleted file mode 100644 index dc3109a..0000000 --- a/data_gen/gen_motion_data.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import numpy as np -from numpy.lib.format import open_memmap - -sets = { - 'train', 'val', 'test' - -} - -datasets = { - 'sign/27_2' -} - -parts = { - 'joint', 'bone' -} -from tqdm import tqdm - -for dataset in datasets: - for set in sets: - for part in parts: - print(dataset, set, part) - data = np.load('../data/{}/{}_data_{}.npy'.format(dataset, set, part)) - N, C, T, V, M = data.shape - print(data.shape) - fp_sp = open_memmap( - '../data/{}/{}_data_{}_motion.npy'.format(dataset, set, part), - dtype='float32', - mode='w+', - shape=(N, C, T, V, M)) - for t in tqdm(range(T - 1)): - fp_sp[:, :, t, :, :] = data[:, :, t + 1, :, :] - data[:, :, t, :, :] - fp_sp[:, :, T - 1, :, :] = 0 diff --git a/data_gen/getConnectingPoint.py b/data_gen/getConnectingPoint.py deleted file mode 100644 index cd37a7b..0000000 --- a/data_gen/getConnectingPoint.py +++ /dev/null @@ -1,147 +0,0 @@ -import pickle -import sys -import numpy as np -import pandas as pd -import os -import h5py -import pandas as pd -sys.path.extend(['../']) - -max_body_true = 1 -max_frame = 150 -num_channels = 2 - -# These three def return an index value less 1 because it array count starts at 1 -def get_mp_keys(points): - tar = np.array(points.mp_pos)-1 - return list(tar) - -def get_op_keys(points): - tar = np.array(points.op_pos)-1 - return list(tar) - -def get_wp_keys(points): - tar = np.array(points.wb_pos)-1 - return list(tar) - -def read_data(path, model_key_getter, config): - data = [] - classes = [] - videoName = [] - - if 'AEC' in path: - list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] - - if 'PUCP' in path: - list_labels_banned = ["ya", "qué?", "qué", "bien", "dos", "ahí", "luego", "yo", "él", "tú","???","NNN"] - list_labels_banned += ["sí","ella","uno","ese","ah","dijo","llamar"] - - if 'WLASL' in path: - list_labels_banned = ['apple','computer','fish','kiss','later','no','orange','pizza','purple','secretary','shirt','sunday','take','water','yellow'] - - - with h5py.File(path, "r") as f: - for index in f.keys(): - label = f[index]['label'][...].item().decode('utf-8') - - if str(label) in list_labels_banned: - continue - - classes.append(label) - videoName.append(f[index]['video_name'][...].item().decode('utf-8')) - data.append(f[index]["data"][...]) - - print('config : ',config) - points = pd.read_csv(f"points_{config}.csv") - - tar = model_key_getter(points) - print('tart',tar) - - data = [d[:,:,tar] for d in data] - - meaning = {v:k for (k,v) in enumerate(sorted(set(classes)))} - - retrive_meaning = {k:v for (k,v) in enumerate(sorted(set(classes)))} - - labels = [meaning[label] for label in classes] - - print('meaning',meaning) - print('retrive_meaning',retrive_meaning) - - return labels, videoName, data, retrive_meaning - - -def gendata(data_path, out_path, model_key_getter, part='train', config=1): - - data=[] - sample_names = [] - - labels, sample_names, data , retrive_meaning = read_data(data_path, model_key_getter,config) - fp = np.zeros((len(labels), max_frame, config, num_channels, max_body_true), dtype=np.float32) - - for i, skel in enumerate(data): - - skel = np.array(skel) - skel = np.moveaxis(skel,1,2) - skel = skel # *256 - - if skel.shape[0] < max_frame: - L = skel.shape[0] - - fp[i,:L,:,:,0] = skel - - rest = max_frame - L - num = int(np.ceil(rest / L)) - pad = np.concatenate([skel for _ in range(num)], 0)[:rest] - fp[i,L:,:,:,0] = pad - - else: - L = skel.shape[0] - - fp[i,:,:,:,0] = skel[:max_frame,:,:] - - - with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f: - pickle.dump((sample_names, labels), f) - - fp = np.transpose(fp, [0, 3, 1, 2, 4]) - print(fp.shape) - np.save('{}/{}_data_joint.npy'.format(out_path, part), fp) - - with open('{}/meaning.pkl'.format(out_path), 'wb') as f: - pickle.dump(retrive_meaning, f) - - - - -if __name__ == '__main__': - - folderName= '1' # just used to create folder "1" in data/sign/1/ - out_folder='../data/sign/' - out_path = os.path.join(out_folder, folderName) - - kp_model = 'wholepose' # openpose wholepose mediapipe - dataset = "WLASL" # WLASL PUCP_PSL_DGI156 AEC - numPoints = 29 # number of points used, need to be: 29 or 71 - - model_key_getter = {'mediapipe': get_mp_keys, - 'openpose': get_op_keys, - 'wholepose': get_wp_keys} - - if not os.path.exists(out_path): - os.makedirs(out_path) - - - print('\n',kp_model, dataset,'\n') - - part = "train" - print(out_path,'->', part) - data_path = f'../../../../joe/ConnectingPoints/split/{dataset}--{kp_model}-Train.hdf5' - gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) - - - part = "val" - print(out_path,'->', part) - data_path = f'../../../ConnectingPoints/split/{dataset}--{kp_model}-Val.hdf5' - - gendata(data_path, out_path, model_key_getter[kp_model], part=part, config=numPoints) diff --git a/data_gen/sign_gendata.py b/data_gen/sign_gendata.py deleted file mode 100644 index f5b4cf1..0000000 --- a/data_gen/sign_gendata.py +++ /dev/null @@ -1,98 +0,0 @@ -import argparse -import pickle -from tqdm import tqdm -import sys -import numpy as np -import os - -sys.path.extend(['../']) - -selected_joints = { - '59': np.concatenate((np.arange(0,17), np.arange(91,133)), axis=0), #59 - '31': np.concatenate((np.arange(0,11), [91,95,96,99,100,103,104,107,108,111],[112,116,117,120,121,124,125,128,129,132]), axis=0), #31 - '27': np.concatenate(([0,5,6,7,8,9,10], - [91,95,96,99,100,103,104,107,108,111],[112,116,117,120,121,124,125,128,129,132]), axis=0) #27 -} - -max_body_true = 1 -max_frame = 150 -num_channels = 3 - - - -def gendata(data_path, label_path, out_path, part='train', config='27'): - labels = [] - data=[] - sample_names = [] - selected = selected_joints[config] - num_joints = len(selected) - label_file = open(label_path, 'r', encoding='utf-8') - - - for line in label_file.readlines(): - line = line.strip() - line = line.split(',') - - sample_names.append(line[0]) - data.append(os.path.join(data_path, line[0] + '_color.mp4.npy')) - # print(line[1]) - labels.append(int(line[1])) - # print(labels[-1]) - - fp = np.zeros((len(data), max_frame, num_joints, num_channels, max_body_true), dtype=np.float32) - - for i, data_path in enumerate(data): - - # print(sample_names[i]) - skel = np.load(data_path) - skel = skel[:,selected,:] - - if skel.shape[0] < max_frame: - L = skel.shape[0] - print(L) - fp[i,:L,:,:,0] = skel - - rest = max_frame - L - num = int(np.ceil(rest / L)) - pad = np.concatenate([skel for _ in range(num)], 0)[:rest] - fp[i,L:,:,:,0] = pad - - else: - L = skel.shape[0] - print(L) - fp[i,:,:,:,0] = skel[:max_frame,:,:] - - - with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f: - pickle.dump((sample_names, labels), f) - - fp = np.transpose(fp, [0, 3, 1, 2, 4]) - print(fp.shape) - np.save('{}/{}_data_joint.npy'.format(out_path, part), fp) - - - - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Sign Data Converter.') - parser.add_argument('--data_path', default='/data/sign/test_npy/npy') #'train_npy/npy', 'va_npy/npy' - parser.add_argument('--label_path', default='../data/sign/27/train_labels.csv') # 'train_labels.csv', 'val_gt.csv', 'test_labels.csv' - parser.add_argument('--out_folder', default='../data/sign/') - parser.add_argument('--points', default='27') - - part = 'test' # 'train', 'val' - arg = parser.parse_args() - - out_path = os.path.join(arg.out_folder, arg.points) - print(out_path) - if not os.path.exists(out_path): - os.makedirs(out_path) - - gendata( - arg.data_path, - arg.label_path, - out_path, - part=part, - config=arg.points) diff --git a/feeders/__init__.py b/feeders/__init__.py deleted file mode 100644 index 7eb0066..0000000 --- a/feeders/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from . import tools -from . import feeder \ No newline at end of file diff --git a/feeders/feeder.py b/feeders/feeder.py deleted file mode 100644 index bc3f9c6..0000000 --- a/feeders/feeder.py +++ /dev/null @@ -1,249 +0,0 @@ -import numpy as np -import pickle -import torch -from torch.utils.data import Dataset -import sys -import random -sys.path.extend(['../']) -from feeders import tools - -# flip_index for 71 and 29 -flip_index = {71:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],[51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70]), axis=0), - 51:np.concatenate(([0,2,1,4,3,6,5,8,7,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30],[31,32,33,34,35,36,37,38,39,40],[41,42,43,44,45,46,47,48,49,50]), axis=0), - 29:np.concatenate(([0,2,1,4,3,6,5,8,7],[9,10,11,12,13,14,15,16,17,18],[19,20,21,22,23,24,25,26,27,28]), axis=0)} - -class Feeder(Dataset): - def __init__(self, data_path, label_path, meaning_path, - random_choose=False, random_shift=False, random_move=False, - window_size=-1, normalization=False, debug=False, use_mmap=True, random_mirror=False, random_mirror_p=0.5, is_vector=False): - - """ - :param data_path: - :param label_path: - :param random_choose: If true, randomly choose a portion of the input sequence - :param random_shift: If true, randomly pad zeros at the begining or end of sequence - :param random_move: - :param window_size: The length of the output sequence - :param normalization: If true, normalize input sequence - :param debug: If true, only use the first 100 samples - :param use_mmap: If true, use mmap mode to load data, which can save the running memory - """ - - self.debug = debug - self.data_path = data_path - self.label_path = label_path - self.meaning_path = meaning_path - self.random_choose = random_choose - self.random_shift = random_shift - self.random_move = random_move - self.window_size = window_size - self.normalization = normalization - self.use_mmap = use_mmap - self.random_mirror = random_mirror - self.random_mirror_p = random_mirror_p - self.load_data() - self.is_vector = is_vector - if normalization: - self.get_mean_map() - - def load_data(self): - # data: N C V T M - - try: - with open(self.label_path) as f: - self.sample_name, self.label = pickle.load(f) - except: - # for pickle file from python2 - with open(self.label_path, 'rb') as f: - self.sample_name, self.label = pickle.load(f, encoding='latin1') - - # load data - if self.use_mmap: - self.data = np.load(self.data_path, mmap_mode='r') - else: - self.data = np.load(self.data_path) - if self.debug: - self.label = self.label[0:100] - self.data = self.data[0:100] - self.sample_name = self.sample_name[0:100] - try: - with open(self.meaning_path) as f: - self.meaning = pickle.load(f) - except: - # for pickle file from python2 - with open(self.meaning_path, 'rb') as f: - self.meaning = pickle.load(f, encoding='latin1') - - - def get_mean_map(self): - data = self.data - N, C, T, V, M = data.shape - self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0) - self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1)) - - def __len__(self): - return len(self.label) - - def __iter__(self): - return self - - def __getitem__(self, index): - data_numpy = self.data[index] - label = self.label[index] - name = self.sample_name[index] - data_numpy = np.array(data_numpy) - - if self.random_choose: - data_numpy = tools.random_choose(data_numpy, self.window_size) - - if self.random_mirror: - if random.random() > self.random_mirror_p: - #print("dabe before random mirror", data_numpy) - assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 or data_numpy.shape[2] == 51 - data_numpy = data_numpy[:,:,flip_index[data_numpy.shape[2]],:] - if self.is_vector: - data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] - else: - data_numpy[0,:,:,:] = 1 - data_numpy[0,:,:,:] - #print("dabe after random mirror", data_numpy) - - if self.normalization: - # data_numpy = (data_numpy - self.mean_map) / self.std_map - assert data_numpy.shape[0] == 2 - #print("dabe before norm", data_numpy) - if self.is_vector: - data_numpy[0,:,0,:] = data_numpy[0,:,0,:] - data_numpy[0,:,0,0].mean(axis=0) - data_numpy[1,:,0,:] = data_numpy[1,:,0,:] - data_numpy[1,:,0,0].mean(axis=0) - else: - data_numpy[0,:,:,:] = data_numpy[0,:,:,:] - data_numpy[0,:,0,0].mean(axis=0) - data_numpy[1,:,:,:] = data_numpy[1,:,:,:] - data_numpy[1,:,0,0].mean(axis=0) - #print("dabe after norm", data_numpy) - if self.random_shift: - - #print("dabe before shift", data_numpy) - if self.is_vector: - data_numpy[0,:,0,:] += random.random() * 20 - 10.0 - data_numpy[1,:,0,:] += random.random() * 20 - 10.0 - else: - data_numpy[0,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 - data_numpy[1,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 - #print("dabe after shift", data_numpy) - - # if self.random_shift: - # data_numpy = tools.random_shift(data_numpy) - - # elif self.window_size > 0: - # data_numpy = tools.auto_pading(data_numpy, self.window_size) - if self.random_move: - data_numpy = tools.random_move(data_numpy) - - return data_numpy, label, index, name - - def top_k(self, score, top_k): - rank = score.argsort() - hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)] - return sum(hit_top_k) * 1.0 / len(hit_top_k) - - -def import_class(name): - components = name.split('.') - mod = __import__(components[0]) - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - - -def test(data_path, label_path, vid=None, graph=None, is_3d=False): - ''' - vis the samples using matplotlib - :param data_path: - :param label_path: - :param vid: the id of sample - :param graph: - :param is_3d: when vis NTU, set it True - :return: - ''' - import matplotlib.pyplot as plt - loader = torch.utils.data.DataLoader( - dataset=Feeder(data_path, label_path), - batch_size=64, - shuffle=False, - num_workers=2) - - if vid is not None: - sample_name = loader.dataset.sample_name - sample_id = [name.split('.')[0] for name in sample_name] - index = sample_id.index(vid) - data, label, index = loader.dataset[index] - data = data.reshape((1,) + data.shape) - - # for batch_idx, (data, label) in enumerate(loader): - N, C, T, V, M = data.shape - - plt.ion() - fig = plt.figure() - if is_3d: - from mpl_toolkits.mplot3d import Axes3D - ax = fig.add_subplot(111, projection='3d') - else: - ax = fig.add_subplot(111) - - if graph is None: - p_type = ['b.', 'g.', 'r.', 'c.', 'm.', 'y.', 'k.', 'k.', 'k.', 'k.'] - pose = [ - ax.plot(np.zeros(V), np.zeros(V), p_type[m])[0] for m in range(M) - ] - ax.axis([-1, 1, -1, 1]) - for t in range(T): - for m in range(M): - pose[m].set_xdata(data[0, 0, t, :, m]) - pose[m].set_ydata(data[0, 1, t, :, m]) - fig.canvas.draw() - plt.pause(0.001) - else: - p_type = ['b-', 'g-', 'r-', 'c-', 'm-', 'y-', 'k-', 'k-', 'k-', 'k-'] - import sys - from os import path - sys.path.append( - path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) - G = import_class(graph)() - edge = G.inward - pose = [] - for m in range(M): - a = [] - for i in range(len(edge)): - if is_3d: - a.append(ax.plot(np.zeros(3), np.zeros(3), p_type[m])[0]) - else: - a.append(ax.plot(np.zeros(2), np.zeros(2), p_type[m])[0]) - pose.append(a) - ax.axis([-1, 1, -1, 1]) - if is_3d: - ax.set_zlim3d(-1, 1) - for t in range(T): - for m in range(M): - for i, (v1, v2) in enumerate(edge): - x1 = data[0, :2, t, v1, m] - x2 = data[0, :2, t, v2, m] - if (x1.sum() != 0 and x2.sum() != 0) or v1 == 1 or v2 == 1: - pose[m][i].set_xdata(data[0, 0, t, [v1, v2], m]) - pose[m][i].set_ydata(data[0, 1, t, [v1, v2], m]) - if is_3d: - pose[m][i].set_3d_properties(data[0, 2, t, [v1, v2], m]) - fig.canvas.draw() - # plt.savefig('/home/lshi/Desktop/skeleton_sequence/' + str(t) + '.jpg') - plt.pause(0.01) - - -if __name__ == '__main__': - import os - - os.environ['DISPLAY'] = 'localhost:10.0' - data_path = "../data/ntu/xview/val_data_joint.npy" - label_path = "../data/ntu/xview/val_label.pkl" - graph = 'graph.ntu_rgb_d.Graph' - test(data_path, label_path, vid='S004C001P003R001A032', graph=graph, is_3d=True) - # data_path = "../data/kinetics/val_data.npy" - # label_path = "../data/kinetics/val_label.pkl" - # graph = 'graph.Kinetics' - # test(data_path, label_path, vid='UOD7oll3Kqo', graph=graph) diff --git a/feeders/tools.py b/feeders/tools.py deleted file mode 100644 index f14e9b9..0000000 --- a/feeders/tools.py +++ /dev/null @@ -1,161 +0,0 @@ -import random - -import numpy as np - - -def downsample(data_numpy, step, random_sample=True): - # input: C,T,V,M - begin = np.random.randint(step) if random_sample else 0 - return data_numpy[:, begin::step, :, :] - - -def temporal_slice(data_numpy, step): - # input: C,T,V,M - C, T, V, M = data_numpy.shape - return data_numpy.reshape(C, T / step, step, V, M).transpose( - (0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M) - - -def mean_subtractor(data_numpy, mean): - # input: C,T,V,M - # naive version - if mean == 0: - return - C, T, V, M = data_numpy.shape - valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0 - begin = valid_frame.argmax() - end = len(valid_frame) - valid_frame[::-1].argmax() - data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean - return data_numpy - - -def auto_pading(data_numpy, size, random_pad=False): - C, T, V, M = data_numpy.shape - if T < size: - begin = random.randint(0, size - T) if random_pad else 0 - data_numpy_paded = np.zeros((C, size, V, M)) - data_numpy_paded[:, begin:begin + T, :, :] = data_numpy - return data_numpy_paded - else: - return data_numpy - - -def random_choose(data_numpy, size, auto_pad=True): - # input: C,T,V,M 随机选择其中一段,不是很合理。因为有0 - C, T, V, M = data_numpy.shape - if T == size: - return data_numpy - elif T < size: - if auto_pad: - return auto_pading(data_numpy, size, random_pad=True) - else: - return data_numpy - else: - begin = random.randint(0, T - size) - return data_numpy[:, begin:begin + size, :, :] - - -def random_move(data_numpy, - angle_candidate=[-10., -5., 0., 5., 10.], - scale_candidate=[0.9, 1.0, 1.1], - transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2], - move_time_candidate=[1]): - # input: C,T,V,M - C, T, V, M = data_numpy.shape - move_time = random.choice(move_time_candidate) - node = np.arange(0, T, T * 1.0 / move_time).round().astype(int) - node = np.append(node, T) - num_node = len(node) - - A = np.random.choice(angle_candidate, num_node) - S = np.random.choice(scale_candidate, num_node) - T_x = np.random.choice(transform_candidate, num_node) - T_y = np.random.choice(transform_candidate, num_node) - - a = np.zeros(T) - s = np.zeros(T) - t_x = np.zeros(T) - t_y = np.zeros(T) - - # linspace - for i in range(num_node - 1): - a[node[i]:node[i + 1]] = np.linspace( - A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180 - s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1], - node[i + 1] - node[i]) - t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1], - node[i + 1] - node[i]) - t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1], - node[i + 1] - node[i]) - - theta = np.array([[np.cos(a) * s, -np.sin(a) * s], - [np.sin(a) * s, np.cos(a) * s]]) # xuanzhuan juzhen - - # perform transformation - for i_frame in range(T): - xy = data_numpy[0:2, i_frame, :, :] - new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1)) - new_xy[0] += t_x[i_frame] - new_xy[1] += t_y[i_frame] # pingyi bianhuan - data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M) - - return data_numpy - - -def random_shift(data_numpy): - # input: C,T,V,M 偏移其中一段 - C, T, V, M = data_numpy.shape - data_shift = np.zeros(data_numpy.shape) - valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0 - begin = valid_frame.argmax() - end = len(valid_frame) - valid_frame[::-1].argmax() - - size = end - begin - bias = random.randint(0, T - size) - data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :] - - return data_shift - - -def openpose_match(data_numpy): - C, T, V, M = data_numpy.shape - assert (C == 3) - score = data_numpy[2, :, :, :].sum(axis=1) - # the rank of body confidence in each frame (shape: T-1, M) - rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M) - - # data of frame 1 - xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1) - # data of frame 2 - xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M) - # square of distance between frame 1&2 (shape: T-1, M, M) - distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0) - - # match pose - forward_map = np.zeros((T, M), dtype=int) - 1 - forward_map[0] = range(M) - for m in range(M): - choose = (rank == m) - forward = distance[choose].argmin(axis=1) - for t in range(T - 1): - distance[t, :, forward[t]] = np.inf - forward_map[1:][choose] = forward - assert (np.all(forward_map >= 0)) - - # string data - for t in range(T - 1): - forward_map[t + 1] = forward_map[t + 1][forward_map[t]] - - # generate data - new_data_numpy = np.zeros(data_numpy.shape) - for t in range(T): - new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[ - t]].transpose(1, 2, 0) - data_numpy = new_data_numpy - - # score sort - trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0) - rank = (-trace_score).argsort() - data_numpy = data_numpy[:, :, :, rank] - - return data_numpy diff --git a/graph/__init__.py b/graph/__init__.py deleted file mode 100644 index 4a1bf91..0000000 --- a/graph/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from . import tools -from . import sign_27 \ No newline at end of file diff --git a/graph/sign_27.py b/graph/sign_27.py deleted file mode 100644 index 7259295..0000000 --- a/graph/sign_27.py +++ /dev/null @@ -1,80 +0,0 @@ -import sys - -sys.path.extend(['../']) -from graph import tools -import pandas as pd - - - -''' -inward_ori_index = [(5, 6), (5, 7), - (6, 8), (8, 10), (7, 9), (9, 11), - (12,13),(12,14),(12,16),(12,18),(12,20), - (14,15),(16,17),(18,19),(20,21), - (22,23),(22,24),(22,26),(22,28),(22,30), - (24,25),(26,27),(28,29),(30,31), - (10,12),(11,22)] - - -inward_ori_index = [(1, 2), (1, 3), (2, 4), (4, 6), (3, 5), (5, 7), - - (6, 8), - (8, 9), (9, 10), (10, 11), (11, 12), - (8, 13), (13, 14), (14, 15), (15, 16), - (8, 17), (17, 18), (18, 19), (19, 20), - (8, 21), (21, 22), (22, 23), (23, 24), - (8, 25), (25, 26), (26, 27), (27, 28), - - (7, 29), - (29, 30), (30, 31), (31, 32), (32, 33), - (29, 34), (34, 35), (35, 36), (36, 37), - (29, 38), (38, 39), (39, 40), (40, 41), - (29, 42), (42, 43), (43, 44), (44, 45), - (29, 46), (46, 47), (47, 48), (48, 49) - ] -''' - - - -class Graph: - def __init__(self, labeling_mode='spatial',num_node=29): - self.num_node = num_node - #num_node = 29 # 29 or 71 - points = pd.read_csv(f"points_{self.num_node}.csv") - ori = points.origin - tar = points.tarjet - - self.inward_ori_index = [(o,t) for o, t in zip(ori, tar)] - - - self.self_link = [(i, i) for i in range(self.num_node)] - - self.inward = [(i - 1, j - 1) for (i, j) in self.inward_ori_index] - self.outward = [(j, i) for (i, j) in self.inward] - self.neighbor = self.inward + self.outward - - print("NUM OF NODES:", self.num_node) - - - self.A = self.get_adjacency_matrix(labeling_mode) - - def get_adjacency_matrix(self, labeling_mode=None): - if labeling_mode is None: - return self.A - if labeling_mode == 'spatial': - A = tools.get_spatial_graph(self.num_node, self.self_link, self.inward, self.outward) - else: - raise ValueError() - return A - - -if __name__ == '__main__': - import matplotlib.pyplot as plt - import os - - # os.environ['DISPLAY'] = 'localhost:11.0' - A = Graph('spatial').get_adjacency_matrix() - for i in A: - plt.imshow(i, cmap='gray') - plt.show() - print(A) diff --git a/graph/tools.py b/graph/tools.py deleted file mode 100644 index 854e30a..0000000 --- a/graph/tools.py +++ /dev/null @@ -1,27 +0,0 @@ -import numpy as np - - -def edge2mat(link, num_node): - A = np.zeros((num_node, num_node)) - for i, j in link: - A[j, i] = 1 - return A - - -def normalize_digraph(A): # 除以每列的和 - Dl = np.sum(A, 0) - h, w = A.shape - Dn = np.zeros((w, w)) - for i in range(w): - if Dl[i] > 0: - Dn[i, i] = Dl[i] ** (-1) - AD = np.dot(A, Dn) - return AD - - -def get_spatial_graph(num_node, self_link, inward, outward): - I = edge2mat(self_link, num_node) - In = normalize_digraph(edge2mat(inward, num_node)) - Out = normalize_digraph(edge2mat(outward, num_node)) - A = np.stack((I, In, Out)) - return A diff --git a/main.py b/main.py deleted file mode 100644 index 7c97b4c..0000000 --- a/main.py +++ /dev/null @@ -1,1053 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -import argparse -import os -import time -import numpy as np -import yaml -import pickle -from collections import OrderedDict -import csv -# torch -import torch -import torch.nn as nn -import torch.optim as optim -from torch.autograd import Variable -from tqdm import tqdm -import shutil -from torch.optim.lr_scheduler import ReduceLROnPlateau -import random -import inspect -import torchmetrics -import matplotlib.pyplot as plt -import seaborn as sns -import pandas as pd -import wandbFunctions as wandbF -import wandb -import time -from data_gen.getConnectingPoint import * - -wandbFlag = True - -# class LabelSmoothingCrossEntropy(nn.Module): -# def __init__(self): -# super(LabelSmoothingCrossEntropy, self).__init__() -# def forward(self, x, target, smoothing=0.1): -# confidence = 1. - smoothing -# logprobs = F.log_softmax(x, dim=-1) -# nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) -# nll_loss = nll_loss.squeeze(1) -# smooth_loss = -logprobs.mean(dim=-1) -# loss = confidence * nll_loss + smoothing * smooth_loss -# return loss.mean() - - -model_name = '' -def create_one_folder(directory): - if not os.path.exists(directory): - os.makedirs(directory) - -def create_folder(directory): - path = directory.split('/') - total_path ='' - for i in path: - total_path = os.path.join(total_path,i) - #print(i, ' create : ',total_path) - create_one_folder(total_path) - - #print('directory : ',directory) - create_one_folder(directory) - create_one_folder(directory+'/') - #print('created paths') - -def init_seed(value_seed): - torch.cuda.manual_seed_all(value_seed) - torch.manual_seed(value_seed) - np.random.seed(value_seed) - random.seed(value_seed) - #torch.backends.cudnn.enabled = False - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def get_parser(): - # parameter priority: command line > config > default - parser = argparse.ArgumentParser(description='Decoupling Graph Convolution Network with DropGraph Module') - parser.add_argument('--work-dir',default='./work_dir/temp',help='the work folder for storing results') - - parser.add_argument('-model_saved_directory', default='') - parser.add_argument('-experiment_name', default='') - parser.add_argument('--config',default='config/sign/train/train_joint.yaml',help='path to the configuration file') - - # processor - parser.add_argument('--phase', default='train', help='must be train or test') - parser.add_argument('--save-score',type=str2bool,default=False,help='if ture, the classification score will be stored') - - # visulize and debug - parser.add_argument('--seed', type=int, default=1, help='random seed for pytorch') - parser.add_argument('--log-interval',type=int,default=100,help='the interval for printing messages (#iteration)') - parser.add_argument('--save-interval',type=int,default=2,help='the interval for storing models (#iteration)') - parser.add_argument('--eval-interval',type=int,default=5,help='the interval for evaluating models (#iteration)') - parser.add_argument('--print-log',type=str2bool,default=True,help='print logging or not') - parser.add_argument('--show-topk',type=int,default=[1, 5],nargs='+',help='which Top K accuracy will be shown') - - # feeder - parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used') - parser.add_argument('--num-worker',type=int,default=32,help='the number of worker for data loader') - parser.add_argument('--train-feeder-args',default=dict(),help='the arguments of data loader for training') - parser.add_argument('--test-feeder-args',default=dict(),help='the arguments of data loader for test') - - # model - parser.add_argument('--model', default=None, help='the model will be used') - parser.add_argument('--model-args',type=dict,default=dict(),help='the arguments of model') - parser.add_argument('--weights',default=None,help='the weights for network initialization') - parser.add_argument('--ignore-weights',type=str,default=[],nargs='+',help='the name of weights which will be ignored in the initialization') - - # optim - parser.add_argument('--base_lr', type=float, default=0.05, help='initial learning rate') - parser.add_argument('--num_epoch',type=int,default=500,help='stop training in which epoch') - - parser.add_argument('--step',type=int,default=[20, 40, 60],nargs='+',help='the epoch where optimizer reduce the learning rate') - parser.add_argument('--device',type=int,default=0,nargs='+',help='the indexes of GPUs for training or testing') - parser.add_argument('--optimizer', default='SGD', help='type of optimizer') - parser.add_argument('--nesterov', type=str2bool, default=False, help='use nesterov or not') - parser.add_argument('--batch-size', type=int, default=32, help='training batch size') - parser.add_argument('--test-batch-size', type=int, default=256, help='test batch size') - parser.add_argument('--start-epoch',type=int,default=0,help='start training from which epoch') - parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer') - parser.add_argument('--keep_rate',type=float,default=0.9,help='keep probability for drop') - parser.add_argument('--groups',type=int,default=8,help='decouple groups') - parser.add_argument('--only_train_part', default=True) - parser.add_argument('--only_train_epoch', default=0) - parser.add_argument('--warm_up_epoch', default=0) - - # Data - - parser.add_argument("--experiment_name", type=str, default="", help="Path to the training dataset CSV file") - parser.add_argument("--training_set_path", type=str, default="", help="Path to the training dataset CSV file") - parser.add_argument("--keypoints_model", type=str, default="openpose", help="Path to the training dataset CSV file") - parser.add_argument("--keypoints_number", type=int, default=29, help="Path to the training dataset CSV file") - parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file") - parser.add_argument("--num_class", type=int, default=0, help="Path to the testing dataset CSV file") - parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") - parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") - - return parser - - -class Processor(): - """ - Processor for Skeleton-based Action Recgnition - """ - - def __init__(self, arg): - - - self.arg = arg - self.save_arg() - self.connectingPoints(arg) - - - if arg.phase == 'train': - pass - ''' - if not arg.train_feeder_args['debug']: - if os.path.isdir(arg.model_saved_directory): - print('log_dir: ', arg.model_saved_directory, 'already exist') - answer = 'y'#input('delete it? y/n:') - if answer == 'y': - shutil.rmtree(arg.model_saved_directory) - print('Dir removed: ', arg.model_saved_directory) - #input('Refresh the website of tensorboard by pressing any keys') - else: - print('Dir not removed: ', arg.model_saved_directory) - ''' - - self.global_step = 0 - self.load_model() - self.load_optimizer() - self.load_data() - self.lr = self.arg.base_lr - self.best_acc = 0 - self.best_tmp_acc = 0 - - self.maxTestAcc = 0 - self.relative_maxtop5 = 0 - - - def connectingPoints(self,arg): - print('Creating points .. ') - - folderName= '1' # just used to create folder "1" in data/sign/1/ - out_folder='data/sign/' - out_path = os.path.join(out_folder, folderName) - - kp_model = arg.kp_model# 'wholepose' # openpose wholepose mediapipe - dataset = arg.experiment_name# "PUCP" # WLASL PUCP_PSL_DGI156 AEC - numPoints = arg.keypoints_number # number of points used, need to be: 29 or 71 - data_path_train = arg.training_set_path #f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Train.hdf5' - data_path_test = arg.testing_set_path#f'../../../../joe/ConnectingPoints/split/WLASL--wholepose-Val.hdf5' - - - model_key_getter = {'mediapipe': get_mp_keys, - 'openpose': get_op_keys, - 'wholepose': get_wp_keys} - - if not os.path.exists(out_path): - os.makedirs(out_path) - - print('kp_model',kp_model) - print('\n',kp_model, dataset,'\n') - print(out_path,'->', 'train') - gendata(data_path_train, out_path, model_key_getter[kp_model], part='train', config=numPoints) - print(out_path,'->', 'val') - gendata(data_path_test, out_path, model_key_getter[kp_model], part='val', config=numPoints) - print('Creating points completed!!! ') - - - def load_data(self): - Feeder = import_class(self.arg.feeder) - ln = Feeder(**self.arg.test_feeder_args) - self.meaning = ln.meaning - #print(ln.meaning) - self.data_loader = dict() - if self.arg.phase == 'train': - self.data_loader['train'] = torch.utils.data.DataLoader( - dataset=Feeder(**self.arg.train_feeder_args), - batch_size=self.arg.batch_size, - shuffle=True, - num_workers=self.arg.num_worker, - drop_last=True, - worker_init_fn=init_seed) - self.data_loader['test'] = torch.utils.data.DataLoader( - dataset=Feeder(**self.arg.test_feeder_args), - batch_size=self.arg.test_batch_size, - shuffle=False, - num_workers=self.arg.num_worker, - drop_last=False, - worker_init_fn=init_seed) - - def load_model(self): - output_device = self.arg.device[0] if type( - self.arg.device) is list else self.arg.device - self.output_device = output_device - - print('^'*20) - print('self.arg.model',self.arg.model) - print('model_args',self.arg.model_args) - - Model = import_class(self.arg.model) - - - shutil.copy2(inspect.getfile(Model), self.arg.work_dir) - self.model = Model(**self.arg.model_args).cuda(output_device) - # print(self.model) - if wandbFlag: - wandbF.watch(self.model) - self.loss = nn.CrossEntropyLoss().cuda(output_device) - - path_model_init = os.path.join(arg.model_saved_directory,arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-"+str(arg.seed)+"-init.pt") - - self.print_log('%'*20) - self.print_log('path_model_init :') - self.print_log(path_model_init) - torch.save(self.model.state_dict(), path_model_init) - self.print_log('%'*20) - - self.m_params = sum(p.numel() for p in self.model.parameters()) - self.trainable_m_params= sum(p.numel() for p in self.model.parameters() if p.requires_grad) - - - - # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) - - - #self.slrt_model_wp.load_state_dict(self.slrt_model_op.state_dict()) - - if self.arg.weights: - self.print_log('Load weights from {}.'.format(self.arg.weights)) - if '.pkl' in self.arg.weights: - with open(self.arg.weights, 'r') as f: - weights = pickle.load(f) - else: - weights = torch.load(self.arg.weights) - self.print_log("weights readed!") - - weights = OrderedDict( - [[k.split('module.')[-1], - v.cuda(output_device)] for k, v in weights.items()]) - - for w in self.arg.ignore_weights: - if weights.pop(w, None) is not None: - self.print_log('Sucessfully Remove Weights: {}.'.format(w)) - else: - self.print_log('Can Not Remove Weights: {}.'.format(w)) - - try: - self.print_log("load state dict weights") - self.model.load_state_dict(weights) - self.print_log("load state dict weights completed!") - - except: - state = self.model.state_dict() - diff = list(set(state.keys()).difference(set(weights.keys()))) - print('Can not find these weights:') - for d in diff: - print(' ' + d) - state.update(weights) - self.model.load_state_dict(state) - - if type(self.arg.device) is list: - if len(self.arg.device) > 1: - self.model = nn.DataParallel( - self.model, - device_ids=self.arg.device, - output_device=output_device) - - def load_optimizer(self): - - if self.arg.optimizer == 'SGD': - - params_dict = dict(self.model.named_parameters()) - params = [] - - for key, value in params_dict.items(): - decay_mult = 0.0 if 'bias' in key else 1.0 - - lr_mult = 1.0 - weight_decay = 1e-4 - - params += [{'params': value, 'lr': self.arg.base_lr, 'lr_mult': lr_mult, - 'decay_mult': decay_mult, 'weight_decay': weight_decay}] - if wandbFlag: - wandb.config = { - "learning_rate": self.arg.base_lr, - "epochs": self.arg.num_epoch, - "batch_size": self.arg.batch_size, - "weight_decay":self.arg.weight_decay, - "num_class":self.arg.model_args["num_class"], - "momentum":0.9 - } - self.optimizer = optim.SGD( - params, - momentum=0.9, - nesterov=self.arg.nesterov) - elif self.arg.optimizer == 'Adam': - self.optimizer = optim.Adam( - self.model.parameters(), - lr=self.arg.base_lr, - weight_decay=self.arg.weight_decay) - - if wandbFlag: - wandb.config = { - "learning_rate": self.arg.base_lr, - "epochs": self.arg.num_epoch, - "batch_size": self.arg.batch_size, - "weight_decay":self.arg.weight_decay, - "num_class":self.arg.model_args["num_class"] - } - else: - raise ValueError() - - self.lr_scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1, - patience=10, verbose=True, - threshold=1e-4, threshold_mode='rel', - cooldown=0) - - - def save_arg(self): - # save arg - arg_dict = vars(self.arg) - - if not os.path.exists(self.arg.work_dir): - os.makedirs(self.arg.work_dir) - os.makedirs(self.arg.work_dir + '/eval_results') - os.makedirs(self.arg.work_dir + '/eval_results/'+ model_name, exist_ok = True) - - with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f: - yaml.dump(arg_dict, f) - - - def adjust_learning_rate(self, epoch): - if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam': - if epoch < self.arg.warm_up_epoch: - lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch - else: - lr = self.arg.base_lr * ( - 0.1 ** np.sum(epoch >= np.array(self.arg.step))) - for param_group in self.optimizer.param_groups: - param_group['lr'] = lr - return lr - else: - raise ValueError() - - - def print_time(self): - localtime = time.asctime(time.localtime(time.time())) - self.print_log("Local current time : " + localtime) - - - def print_log(self, str, print_time=True): - if print_time: - localtime = time.asctime(time.localtime(time.time())) - str = "[ " + localtime + ' ] ' + str - print(str) - if self.arg.print_log: - with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f: - print(str, file=f) - - - def record_time(self): - self.cur_time = time.time() - return self.cur_time - - - def split_time(self): - split_time = time.time() - self.cur_time - self.record_time() - return split_time - - def train_zero(self, epoch, save_model=False): - self.model.train(False) - loader = self.data_loader['train'] - loss_value = [] - predict_arr = [] - proba_arr = [] - target_arr = [] - - self.record_time() - - timer = dict(dataloader=0.001, model=0.001, statistics=0.001) - process = tqdm(loader) - meaning = list(self.meaning.values()) - - for batch_idx, (data, label, index, name) in enumerate(process): - - self.global_step += 1 - - label_tmp = label.cpu().numpy() - # get data - data = Variable(data.float().cuda(self.output_device), requires_grad=False) - label = Variable(label.long().cuda(self.output_device), requires_grad=False) - timer['dataloader'] += self.split_time() - - # forward - if epoch < 100: - keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 - else: - keep_prob = self.arg.keep_rate - - output = self.model(data, keep_prob) - - if isinstance(output, tuple): - output, l1 = output - l1 = l1.mean() - else: - l1 = 0 - - #print('output',output) - #print('label',label) - loss = self.loss(output, label) - #print('loss',loss) - #for r,s in zip(name,label_tmp): - # meaning[s]= '_'.join(r.split('_')[:-1]) - - loss_value.append(loss.data.cpu().numpy()) - timer['model'] += self.split_time() - - value, predict_label = torch.max(output.data, 1) - - predict_arr.append(predict_label.cpu().numpy()) - target_arr.append(label.data.cpu().numpy()) - proba_arr.append(output.data.cpu().numpy()) - - acc = torch.mean((predict_label == label.data).float()) - - - if self.global_step % self.arg.log_interval == 0: - self.print_log( - '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( - batch_idx, len(loader), loss.data, self.lr)) - timer['statistics'] += self.split_time() - - predict_arr = np.concatenate(predict_arr) - target_arr = np.concatenate(target_arr) - proba_arr = np.concatenate(proba_arr) - accuracy = torch.mean((predict_label == label.data).float()) - if accuracy >= self.best_tmp_acc: - self.best_tmp_acc = accuracy - - if epoch+1 == arg.num_epoch: - if wandbFlag: - wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( - #probs=score, - #y_true=list(label.values()), - #preds=list(predict_label.values()), - y_true=list(target_arr), - preds=list(predict_arr), - class_names=meaning, - title="TRAIN_conf_mat")}) - - if wandbFlag: - mean_loss = np.mean(loss_value) - if mean_loss>10: - mean_loss = 10 - wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) - # statistics of time consumption and loss - proportion = { - k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) - for k, v in timer.items() - } - - - def train(self, epoch, save_model=False): - self.model.train() - self.print_log('Training epoch: {}'.format(epoch + 1)) - loader = self.data_loader['train'] - self.adjust_learning_rate(epoch) - loss_value = [] - predict_arr = [] - proba_arr = [] - target_arr = [] - - self.record_time() - - timer = dict(dataloader=0.001, model=0.001, statistics=0.001) - process = tqdm(loader) - if epoch >= self.arg.only_train_epoch: - print('only train part, require grad') - for key, value in self.model.named_parameters(): - if 'DecoupleA' in key: - value.requires_grad = True - print(key + '-require grad') - else: - print('only train part, do not require grad') - for key, value in self.model.named_parameters(): - if 'DecoupleA' in key: - value.requires_grad = False - print(key + '-not require grad') - - meaning = list(self.meaning.values()) - - for batch_idx, (data, label, index, name) in enumerate(process): - - self.global_step += 1 - - label_tmp = label.cpu().numpy() - # get data - data = Variable(data.float().cuda( - self.output_device), requires_grad=False) - label = Variable(label.long().cuda( - self.output_device), requires_grad=False) - timer['dataloader'] += self.split_time() - - # forward - if epoch < 100: - keep_prob = -(1 - self.arg.keep_rate) / 100 * epoch + 1.0 - else: - keep_prob = self.arg.keep_rate - - output = self.model(data, keep_prob) - - if isinstance(output, tuple): - output, l1 = output - l1 = l1.mean() - else: - l1 = 0 - loss = self.loss(output, label) + l1 - - #for r,s in zip(name,label_tmp): - # meaning[s]= '_'.join(r.split('_')[:-1]) - - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - loss_value.append(loss.data.cpu().numpy()) - timer['model'] += self.split_time() - - value, predict_label = torch.max(output.data, 1) - - predict_arr.append(predict_label.cpu().numpy()) - target_arr.append(label.data.cpu().numpy()) - proba_arr.append(output.data.cpu().numpy()) - - acc = torch.mean((predict_label == label.data).float()) - - self.lr = self.optimizer.param_groups[0]['lr'] - - if self.global_step % self.arg.log_interval == 0: - self.print_log( - '\tBatch({}/{}) done. Loss: {:.4f} lr:{:.6f}'.format( - batch_idx, len(loader), loss.data, self.lr)) - timer['statistics'] += self.split_time() - - predict_arr = np.concatenate(predict_arr) - target_arr = np.concatenate(target_arr) - proba_arr = np.concatenate(proba_arr) - accuracy = torch.mean((predict_label == label.data).float()) - if accuracy >= self.best_tmp_acc: - self.best_tmp_acc = accuracy - - if epoch+1 == arg.num_epoch: - if wandbFlag: - wandb.log({"TRAIN_conf_mat" : wandb.plot.confusion_matrix( - #probs=score, - #y_true=list(label.values()), - #preds=list(predict_label.values()), - y_true=list(target_arr), - preds=list(predict_arr), - class_names=meaning, - title="TRAIN_conf_mat")}) - - if wandbFlag: - mean_loss = np.mean(loss_value) - if mean_loss>10: - mean_loss = 10 - - wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) - # statistics of time consumption and loss - proportion = { - k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) - for k, v in timer.items() - } - - - def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None, isTest=False): - if wrong_file is not None: - f_w = open(wrong_file, 'w') - if result_file is not None: - f_r = open(result_file, 'w') - #if isTest: - submission = dict() - trueLabels = dict() - - meaning = list(self.meaning.values()) - self.model.eval() - with torch.no_grad(): - self.print_log('Eval epoch: {}'.format(epoch + 1)) - for ln in loader_name: - - loss_value = [] - score_frag = [] - right_num_total = 0 - total_num = 0 - loss_total = 0 - step = 0 - process = tqdm(self.data_loader[ln]) - - for batch_idx, (data, label, index, name) in enumerate(process): - label_tmp = label - data = Variable( - data.float().cuda(self.output_device), - requires_grad=False) - label = Variable( - label.long().cuda(self.output_device), - requires_grad=False) - - with torch.no_grad(): - output = self.model(data) - - if isinstance(output, tuple): - output, l1 = output - l1 = l1.mean() - else: - l1 = 0 - - #print('val output',output) - #print('val label',label) - loss = self.loss(output, label) - #print('val loss',loss) - score_frag.append(output.data.cpu().numpy()) - loss_value.append(loss.data.cpu().numpy()) - - _, predict_label = torch.max(output.data, 1) - - #if isTest: - for j in range(output.size(0)): - submission[name[j]] = predict_label[j].item() - trueLabels[name[j]] = label_tmp[j].item() - - step += 1 - - if wrong_file is not None or result_file is not None: - predict = list(predict_label.cpu().numpy()) - true = list(label.data.cpu().numpy()) - for i, x in enumerate(predict): - if result_file is not None: - f_r.write(str(x) + ',' + str(true[i]) + '\n') - if x != true[i] and wrong_file is not None: - f_w.write(str(index[i]) + ',' + - str(x) + ',' + str(true[i]) + '\n') - score = np.concatenate(score_frag) - - if 'UCLA' in arg.experiment_name: - self.data_loader[ln].dataset.sample_name = np.arange( - len(score)) - - accuracy = self.data_loader[ln].dataset.top_k(score, 1) - top5 = self.data_loader[ln].dataset.top_k(score, 5) - - if accuracy > self.best_acc: - self.best_acc = accuracy - - score_dict = dict( - zip(self.data_loader[ln].dataset.sample_name, score)) - - conf_mat = torchmetrics.ConfusionMatrix(num_classes=self.arg.model_args["num_class"]) - ''' - print('self.arg.model_args["num_class"]',self.arg.model_args["num_class"]) - - print('list(submission.values())',list(submission.values())) - print('set(list(submission.values()))',set(list(submission.values()))) - print('len(set(list(submission.values())))',len(set(list(submission.values())))) - - print('list(trueLabels.values())',list(trueLabels.values())) - print('set(list(trueLabels.values()))',set(list(trueLabels.values()))) - print('len(set(list(trueLabels.values())))',len(set(list(trueLabels.values())))) - ''' - confusion_matrix = conf_mat(torch.tensor(list(submission.values())).cpu(), torch.tensor(list(trueLabels.values())).cpu()) - confusion_matrix = confusion_matrix.detach().cpu().numpy() - - plt.figure(figsize = (10,7)) - - group_counts = ["{0:0.0f}".format(value) for value in confusion_matrix.flatten()] - ''' - print('confusion_matrix') - print(confusion_matrix) - print('len confusion_matrix') - - print(len(confusion_matrix)) - for line in confusion_matrix: - print('line',line) - print(len(line)) - ''' - confusion_matrix = np.asarray([line/(np.sum(line)+0.0001) for line in confusion_matrix]) - confusion_matrix = np.nan_to_num(confusion_matrix) - - df_cm = pd.DataFrame(confusion_matrix * 100, index = meaning, columns=meaning) - #size_arr = df_cm.sum(axis = 1) - #maxi = max(size_arr) - - group_percentages = ["{0:.1%}".format(value) for value in confusion_matrix.flatten()] - - annot = ["{1}".format(v2, v1) for v1, v2 in zip(group_counts, group_percentages)] - annot = np.asarray(annot).reshape(self.arg.model_args["num_class"], self.arg.model_args["num_class"]) - fig_ = sns.heatmap(df_cm, vmax=100, vmin=0, annot=annot, annot_kws={"size": 5}, cbar_kws={'format': '%.0f%%', 'ticks':[0, 25, 50, 75, 100]},fmt='', cmap='Blues').get_figure() - plt.ylabel('True label') - plt.xlabel('Predicted label' ) - - plt.close(fig_) - - if wandbFlag: - wandb.log({"Confusion matrix": wandb.Image(fig_, caption="VAL_conf_mat")}) - - - print('*'*20) - print('*'*20) - print('*'*20) - - print('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl') - - - with open('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( - epoch, accuracy), 'wb') as f: - pickle.dump(score_dict, f) - - # Save the model - state_dict = self.model.state_dict() - weights = OrderedDict([[k.split('module.')[-1], - v.cpu()] for k, v in state_dict.items()]) - - print('*'*20) - print('*'*20) - print('*'*20) - print(self.arg.model_saved_directory) - print(self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') - torch.save(weights, self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') - - - if epoch + 1 == arg.num_epoch: - - if wandbFlag: - try: - wandb.log({"roc" : wandb.plot.roc_curve( list(trueLabels.values()), score, \ - labels=meaning, classes_to_plot=None)}) - - wandb.log({"pr" : wandb.plot.pr_curve(list(trueLabels.values()), score, - labels=meaning, classes_to_plot=None)}) - except: - pass - #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, - # , meaning_3)}) - ''' - wandb.log({"VAL_conf_mat" : wandb.plot.confusion_matrix( - #probs=score, - y_true=list(trueLabels.values()), - preds=list(submission.values()), - class_names=meaning_3, - title="VAL_conf_mat")}) - ''' - - print('Eval Accuracy: ', accuracy, - ' model: ', self.arg.model_saved_directory) - if wandbFlag: - mean_loss = np.mean(loss_value) - if mean_loss>10: - mean_loss = 10 - - self.maxTestAcc = max(accuracy,self.maxTestAcc) - - if self.maxTestAcc == accuracy: - - self.relative_maxtop5 = top5 - - wandbF.wandbValLog(mean_loss, accuracy, top5,self.maxTestAcc,self.relative_maxtop5) - - score_dict = dict( - zip(self.data_loader[ln].dataset.sample_name, score)) - self.print_log('\tMean {} loss of {} batches: {}.'.format( - ln, len(self.data_loader[ln]), np.mean(loss_value))) - for k in self.arg.show_topk: - self.print_log('\tTop{}: {:.2f}%'.format( - k, 100 * self.data_loader[ln].dataset.top_k(score, k))) - ''' - with open('./work_dir/' + arg.experiment_name + '/eval_results/epoch_' + str(epoch) + '_' + str(accuracy) + '.pkl'.format( - epoch, accuracy), 'wb') as f: - pickle.dump(score_dict, f) - ''' - - - predLabels = [] - groundLabels = [] - print("END") - if isTest: - #print(submission) - #print(trueLabels) - totalRows = 0 - with open("submission.csv", 'w') as of: - writer = csv.writer(of) - accum = 0 - for trueName, truePred in trueLabels.items(): - - sample = trueName - #print(f'Predicting {sample}', end=' ') - #print(f'as {submission[sample]} - pred {submission[sample]} and real {row[1]}') - match=0 - predLabels.append(submission[sample]) - groundLabels.append(int(truePred)) - if int(truePred) == int(submission[sample]): - match=1 - accum+=1 - totalRows+=1 - - # identifying subject - with open("pucpSubject.csv") as subjectFile: - readerSubject = csv.reader(subjectFile) - idx = int(sample.split('_')[-1]) - subjectName = 'NA' - for name, idxStart, idxEnd in readerSubject: - if (int(idxStart) <= idx) and (idx<= int(idxEnd)): - subjectName = name - break - writer.writerow([sample, submission[sample], str(truePred), str(match), subjectName]) - - return np.mean(loss_value) - - - def start(self): - if self.arg.phase == 'train': - self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg)))) - self.global_step = self.arg.start_epoch * \ - len(self.data_loader['train']) / self.arg.batch_size - - self.model.train(False) - self.train_zero(0, save_model=False) - val_loss = self.eval(0,save_score=self.arg.save_score,loader_name=['test']) - self.model.train(True) - - for epoch in range(self.arg.start_epoch, self.arg.num_epoch): - - save_model = ((epoch + 1) % self.arg.save_interval == 0) or ( - epoch + 1 == self.arg.num_epoch) - - self.train(epoch, save_model=save_model) - - val_loss = self.eval( - epoch, - save_score=self.arg.save_score, - loader_name=['test']) - - # self.lr_scheduler.step(val_loss) - - print('best accuracy: ', self.best_acc, - ' model_name: ', self.arg.model_saved_directory) - - elif self.arg.phase == 'test': - if not self.arg.test_feeder_args['debug']: - wf = self.arg.model_saved_directory + '_wrong.txt' - rf = self.arg.model_saved_directory + '_right.txt' - else: - wf = rf = None - if self.arg.weights is None: - raise ValueError('Please appoint --weights.') - self.arg.print_log = False - self.print_log('Model: {}.'.format(self.arg.model)) - self.print_log('Weights: {}.'.format(self.arg.weights)) - self.eval(epoch=self.arg.start_epoch, save_score=self.arg.save_score, - loader_name=['test'], wrong_file=wf, result_file=rf, isTest=True) - self.print_log('Done.\n') - - -def str2bool(v): - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -def import_class(name): - components = name.split('.') - mod = __import__(components[0]) # import return model - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - - -if __name__ == '__main__': - - - parser = get_parser() - arg = parser.parse_args() - print('seed :',arg.seed) - init_seed(arg.seed) - - for id_iteration in range(1): - - # load arg form config file - - - - print('arg.config',arg.config) - if arg.config is not None: - with open(arg.config, 'r') as f: - #default_arg = yaml.load(f) - default_arg = yaml.safe_load(f) - print('default_arg',default_arg) - key = vars(arg).keys() - for k in default_arg.keys(): - if k not in key: - print('WRONG ARG: {}'.format(k)) - assert (k in key) - parser.set_defaults(**default_arg) - - # load arg form config file - arg = parser.parse_args() - - arg.training_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../DATASETS/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' - - if arg.database == 'AEC': - arg.num_class = 28 - - if arg.database == 'WLASL': - - arg.num_class = 86 - - if arg.database == 'PUCP': - arg.num_class = 29 - arg.training_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../DATASETS/PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' - - arg.model_args['num_class'] =arg.num_class - arg.model_args['num_point'] =arg.keypoints_number - - arg.model_args['graph_args']['num_node'] =arg.keypoints_number - - #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 - #num_point: 29 # 29 or 71 - - # arg.training_set_path - # arg.keypoints_model - # arg.keypoints_number - # arg.testing_set_path - # arg.experiment_name - # arg.base_lr - # arg.num_epoch - - - config = { - # - "num-epoch": arg.num_epoch, - "weight-decay": arg.weight_decay, - "batch-size":arg.batch_size, - "base-lr": arg.base_lr, - "kp-model": arg.keypoints_model, - "num_points": arg.keypoints_number, - "database": arg.database, - "mode_train":arg.mode_train, - "seed":arg.seed, - "id_iteration":id_iteration, - } - import wandb - import os - - os.environ["WANDB_API_KEY"] = "15f7c99e787e3f99da09963b0cfb45b73656845f" - - if wandbFlag: - wandb.init(project="sign_language_project", - entity="ml_projects", - reinit=True, - config=config) - - config = wandb.config - print('+'*10) - print('config :',config) - print('+'*10) - arg.base_lr = config["base-lr"] - arg.batch_size = config["batch-size"] - arg.weight_decay = config["weight-decay"] - arg.num_epoch = config["num-epoch"] - arg.kp_model = config["kp-model"] - arg.database = config["database"] - - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" - arg.work_dir = "work_dir/"+arg.experiment_name+"/" - - print('*'*20) - print('*'*20) - - print('model_saved_directory',arg.model_saved_directory) - print('work_dir',arg.work_dir) - - - create_folder(arg.model_saved_directory) - create_folder(arg.work_dir) - create_folder('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/') - - # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} - #os.makedirs(arg.file_name,exist_ok=True) - - runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) - - model_name = runAndModelName - print('model_name : ',model_name) - if wandbFlag: - wandb.run.name = runAndModelName - wandb.run.save() - - - - print("*"*30) - print("*"*30) - print(arg) - print("*"*30) - print("*"*30) - print(arg.train_feeder_args) - print('train_feeder_args',arg.train_feeder_args) - processor = Processor(arg) - processor.start() - if wandbFlag: - wandb.finish() - print("wandb finish") diff --git a/model/__init__.py b/model/__init__.py deleted file mode 100644 index a8a7cbd..0000000 --- a/model/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from . import decouple_gcn_attn -from . import dropSke -from . import dropT \ No newline at end of file diff --git a/model/decouple_gcn_attn.py b/model/decouple_gcn_attn.py deleted file mode 100644 index 9f8bbae..0000000 --- a/model/decouple_gcn_attn.py +++ /dev/null @@ -1,284 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -import numpy as np -import math -from model.dropSke import DropBlock_Ske -from model.dropT import DropBlockT_1d - - -def import_class(name): - components = name.split('.') - mod = __import__(components[0]) - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - - -def conv_branch_init(conv): - weight = conv.weight - n = weight.size(0) - k1 = weight.size(1) - k2 = weight.size(2) - nn.init.normal(weight, 0, math.sqrt(2. / (n * k1 * k2))) - nn.init.constant(conv.bias, 0) - - -def conv_init(conv): - nn.init.kaiming_normal(conv.weight, mode='fan_out') - nn.init.constant(conv.bias, 0) - - -def bn_init(bn, scale): - nn.init.constant(bn.weight, scale) - nn.init.constant(bn.bias, 0) - - -class unit_tcn(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size=9, stride=1, num_point=25, block_size=41): - super(unit_tcn, self).__init__() - pad = int((kernel_size - 1) / 2) - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), - stride=(stride, 1)) - - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - conv_init(self.conv) - bn_init(self.bn, 1) - - self.dropS = DropBlock_Ske(num_point=num_point) - self.dropT = DropBlockT_1d(block_size=block_size) - - def forward(self, x, keep_prob, A): - x = self.bn(self.conv(x)) - x = self.dropT(self.dropS(x, keep_prob, A), keep_prob) - return x - - -class unit_tcn_skip(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): - super(unit_tcn_skip, self).__init__() - pad = int((kernel_size - 1) / 2) - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), - stride=(stride, 1)) - - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - conv_init(self.conv) - bn_init(self.bn, 1) - - def forward(self, x): - x = self.bn(self.conv(x)) - return x - - -class unit_gcn(nn.Module): - def __init__(self, in_channels, out_channels, A, groups, num_point, coff_embedding=4, num_subset=3): - super(unit_gcn, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.num_point = num_point - self.groups = groups - self.num_subset = num_subset - self.DecoupleA = nn.Parameter(torch.tensor(np.reshape(A.astype(np.float32), [ - 3, 1, num_point, num_point]), dtype=torch.float32, requires_grad=True).repeat(1, groups, 1, 1), requires_grad=True) - - if in_channels != out_channels: - self.down = nn.Sequential( - nn.Conv2d(in_channels, out_channels, 1), - nn.BatchNorm2d(out_channels) - ) - else: - self.down = lambda x: x - - self.bn0 = nn.BatchNorm2d(out_channels * num_subset) - self.bn = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU() - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - conv_init(m) - elif isinstance(m, nn.BatchNorm2d): - bn_init(m, 1) - bn_init(self.bn, 1e-6) - - self.Linear_weight = nn.Parameter(torch.zeros( - in_channels, out_channels * num_subset, requires_grad=True, device='cuda'), requires_grad=True) - nn.init.normal_(self.Linear_weight, 0, math.sqrt( - 0.5 / (out_channels * num_subset))) - - self.Linear_bias = nn.Parameter(torch.zeros( - 1, out_channels * num_subset, 1, 1, requires_grad=True, device='cuda'), requires_grad=True) - nn.init.constant(self.Linear_bias, 1e-6) - - eye_array = [] - for i in range(out_channels): - eye_array.append(torch.eye(num_point)) - self.eyes = nn.Parameter(torch.tensor(torch.stack( - eye_array), requires_grad=False, device='cuda'), requires_grad=False) # [c,25,25] - - def norm(self, A): - b, c, h, w = A.size() - A = A.view(c, self.num_point, self.num_point) - D_list = torch.sum(A, 1).view(c, 1, self.num_point) - D_list_12 = (D_list + 0.001)**(-1) - D_12 = self.eyes * D_list_12 - A = torch.bmm(A, D_12).view(b, c, h, w) - return A - - def forward(self, x0): - learn_A = self.DecoupleA.repeat( - 1, self.out_channels // self.groups, 1, 1) - norm_learn_A = torch.cat([self.norm(learn_A[0:1, ...]), self.norm( - learn_A[1:2, ...]), self.norm(learn_A[2:3, ...])], 0) - - x = torch.einsum( - 'nctw,cd->ndtw', (x0, self.Linear_weight)).contiguous() - x = x + self.Linear_bias - x = self.bn0(x) - - n, kc, t, v = x.size() - x = x.view(n, self.num_subset, kc // self.num_subset, t, v) - x = torch.einsum('nkctv,kcvw->nctw', (x, norm_learn_A)) - - x = self.bn(x) - x += self.down(x0) - x = self.relu(x) - return x - - -class TCN_GCN_unit(nn.Module): - def __init__(self, in_channels, out_channels, A, groups, num_point, block_size, stride=1, residual=True, attention=True): - super(TCN_GCN_unit, self).__init__() - num_jpts = A.shape[-1] - self.gcn1 = unit_gcn(in_channels, out_channels, A, groups, num_point) - self.tcn1 = unit_tcn(out_channels, out_channels, - stride=stride, num_point=num_point) - self.relu = nn.ReLU() - - self.A = nn.Parameter(torch.tensor(np.sum(np.reshape(A.astype(np.float32), [ - 3, num_point, num_point]), axis=0), dtype=torch.float32, requires_grad=False, device='cuda'), requires_grad=False) - - if not residual: - self.residual = lambda x: 0 - - elif (in_channels == out_channels) and (stride == 1): - self.residual = lambda x: x - - else: - self.residual = unit_tcn_skip( - in_channels, out_channels, kernel_size=1, stride=stride) - self.dropSke = DropBlock_Ske(num_point=num_point) - self.dropT_skip = DropBlockT_1d(block_size=block_size) - self.attention = attention - if attention: - print('Attention Enabled!') - self.sigmoid = nn.Sigmoid() - # temporal attention - self.conv_ta = nn.Conv1d(out_channels, 1, 9, padding=4) - nn.init.constant_(self.conv_ta.weight, 0) - nn.init.constant_(self.conv_ta.bias, 0) - # s attention - ker_jpt = num_jpts - 1 if not num_jpts % 2 else num_jpts - pad = (ker_jpt - 1) // 2 - self.conv_sa = nn.Conv1d(out_channels, 1, ker_jpt, padding=pad) - nn.init.xavier_normal_(self.conv_sa.weight) - nn.init.constant_(self.conv_sa.bias, 0) - # channel attention - rr = 2 - self.fc1c = nn.Linear(out_channels, out_channels // rr) - self.fc2c = nn.Linear(out_channels // rr, out_channels) - nn.init.kaiming_normal_(self.fc1c.weight) - nn.init.constant_(self.fc1c.bias, 0) - nn.init.constant_(self.fc2c.weight, 0) - nn.init.constant_(self.fc2c.bias, 0) - - def forward(self, x, keep_prob): - y = self.gcn1(x) - if self.attention: - # spatial attention - se = y.mean(-2) # N C V - se1 = self.sigmoid(self.conv_sa(se)) - y = y * se1.unsqueeze(-2) + y - # a1 = se1.unsqueeze(-2) - - # temporal attention - se = y.mean(-1) - se1 = self.sigmoid(self.conv_ta(se)) - y = y * se1.unsqueeze(-1) + y - # a2 = se1.unsqueeze(-1) - - # channel attention - se = y.mean(-1).mean(-1) - se1 = self.relu(self.fc1c(se)) - se2 = self.sigmoid(self.fc2c(se1)) - y = y * se2.unsqueeze(-1).unsqueeze(-1) + y - # a3 = se2.unsqueeze(-1).unsqueeze(-1) - - y = self.tcn1(y, keep_prob, self.A) - x_skip = self.dropT_skip(self.dropSke(self.residual(x), keep_prob, self.A), keep_prob) - return self.relu(y + x_skip) - - -class Model(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): - super(Model, self).__init__() - - if graph is None: - raise ValueError() - else: - Graph = import_class(graph) - self.graph = Graph(**graph_args) - self.graph.num_node = num_point - - A = self.graph.A - self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - - self.l1 = TCN_GCN_unit(in_channels, 64, A, groups, num_point, - block_size, residual=False) - self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l5 = TCN_GCN_unit( - 64, 128, A, groups, num_point, block_size, stride=2) - self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(128, 256, A, groups, - num_point, block_size, stride=2) - self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - - self.fc = nn.Linear(256, num_class) - nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) - bn_init(self.data_bn, 1) - - def forward(self, x, keep_prob=0.9): - N, C, T, V, M = x.size() - x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) - x = self.data_bn(x) - x = x.view(N, M, V, C, T).permute( - 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) - - x = self.l1(x, 1.0) - x = self.l2(x, 1.0) - x = self.l3(x, 1.0) - x = self.l4(x, 1.0) - x = self.l5(x, 1.0) - x = self.l6(x, 1.0) - x = self.l7(x, keep_prob) - x = self.l8(x, keep_prob) - x = self.l9(x, keep_prob) - x = self.l10(x, keep_prob) - - # N*M,C,T,V - c_new = x.size(1) - - # print(x.size()) - # print(N, M, c_new) - - # x = x.view(N, M, c_new, -1) - x = x.reshape(N, M, c_new, -1) - x = x.mean(3).mean(1) - - return self.fc(x) diff --git a/model/dropSke.py b/model/dropSke.py deleted file mode 100644 index c18456a..0000000 --- a/model/dropSke.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn -import warnings - - -class DropBlock_Ske(nn.Module): - def __init__(self, num_point, block_size=7): - super(DropBlock_Ske, self).__init__() - self.keep_prob = 0.0 - self.block_size = block_size - self.num_point = num_point - - def forward(self, input, keep_prob, A): # n,c,t,v - self.keep_prob = keep_prob - if not self.training or self.keep_prob == 1: - return input - n, c, t, v = input.size() - - input_abs = torch.mean(torch.mean( - torch.abs(input), dim=2), dim=1).detach() - input_abs = input_abs / torch.sum(input_abs) * input_abs.numel() - if self.num_point == 25: # Kinect V2 - gamma = (1. - self.keep_prob) / (1 + 1.92) - elif self.num_point == 20: # Kinect V1 - gamma = (1. - self.keep_prob) / (1 + 1.9) - else: - gamma = (1. - self.keep_prob) / (1 + 1.92) - warnings.warn('undefined skeleton graph') - M_seed = torch.bernoulli(torch.clamp( - input_abs * gamma, max=1.0)).to(device=input.device, dtype=input.dtype) - M = torch.matmul(M_seed, A) - M[M > 0.001] = 1.0 - M[M < 0.5] = 0.0 - mask = (1 - M).view(n, 1, 1, self.num_point) - return input * mask * mask.numel() / mask.sum() diff --git a/model/dropT.py b/model/dropT.py deleted file mode 100644 index 6ed1167..0000000 --- a/model/dropT.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -class DropBlockT_1d(nn.Module): - def __init__(self, block_size=7): - super(DropBlockT_1d, self).__init__() - self.keep_prob = 0.0 - self.block_size = block_size - - def forward(self, input, keep_prob): - self.keep_prob = keep_prob - if not self.training or self.keep_prob == 1: - return input - n,c,t,v = input.size() - - input_abs = torch.mean(torch.mean(torch.abs(input),dim=3),dim=1).detach() - input_abs = (input_abs/torch.sum(input_abs)*input_abs.numel()).view(n,1,t) - gamma = (1. - self.keep_prob) / self.block_size - input1 = input.permute(0,1,3,2).contiguous().view(n,c*v,t) - M = torch.bernoulli(torch.clamp(input_abs * gamma, max=1.0)).repeat(1,c*v,1) - Msum = F.max_pool1d(M, kernel_size=[self.block_size], stride=1, padding=self.block_size // 2) - mask = (1 - Msum).to(device=input.device, dtype=input.dtype) - return (input1 * mask * mask.numel() /mask.sum()).view(n,c,v,t).permute(0,1,3,2) diff --git a/points_51.csv b/points_51.csv deleted file mode 100644 index 1cd9e02..0000000 --- a/points_51.csv +++ /dev/null @@ -1,52 +0,0 @@ -tar_name,ori_name,mp_pos,wb_pos,op_pos,origin,tarjet -pose_nose,pose_nose,1,1,1,1,1 -pose_left_eye,pose_nose,3,2,17,1,2 -pose_right_eye,pose_nose,6,3,16,1,3 -pose_left_shoulder,pose_nose,12,6,6,1,4 -pose_right_shoulder,pose_nose,13,7,3,1,5 -pose_left_elbow,pose_left_shoulder,14,8,7,4,6 -pose_right_elbow,pose_right_shoulder,15,9,4,5,7 -pose_left_wrist,pose_left_elbow,16,10,8,6,8 -pose_right_wrist,pose_right_elbow,17,11,5,7,9 -face_right_mouth_up,pose_nose,71,74,76,1,10 -face_right_eyebrow_inner,pose_nose,89,45,47,1,11 -face_right_mouth_corner,face_right_mouth_up,91,72,74,10,12 -face_right_eyebrow_outer,face_right_eyebrow_middle,104,41,43,15,13 -face_right_mouth_down,face_right_mouth_corner,118,80,82,12,14 -face_right_eyebrow_middle,face_right_eyebrow_inner,139,43,45,11,15 -face_left_mouth_up,pose_nose,301,76,78,1,16 -face_left_eyebrow_inner,pose_nose,319,46,48,1,17 -face_left_mouth_corner,face_left_mouth_up,321,78,80,16,18 -face_left_eyebrow_outer,face_left_eyebrow_middle,334,50,52,21,19 -face_left_mouth_down,face_left_mouth_corner,348,82,84,18,20 -face_left_eyebrow_middle,face_left_eyebrow_inner,368,48,50,17,21 -leftHand_thumb_cmc,pose_left_wrist,503,93,97,8,22 -leftHand_thumb_mcp,leftHand_thumb_cmc,504,94,98,22,23 -leftHand_thumb_tip,leftHand_thumb_ip,506,96,100,23,24 -leftHand_index_finger_mcp,pose_left_wrist,507,97,101,8,25 -leftHand_index_finger_pip,leftHand_index_finger_mcp,508,98,102,25,26 -leftHand_index_finger_tip,leftHand_index_finger_dip,510,100,104,26,27 -leftHand_middle_finger_mcp,pose_left_wrist,511,101,105,8,28 -leftHand_middle_finger_pip,leftHand_middle_finger_mcp,512,102,106,28,29 -leftHand_middle_finger_tip,leftHand_middle_finger_dip,514,104,108,29,30 -leftHand_ring_finger_mcp,pose_left_wrist,515,105,109,8,31 -leftHand_ring_finger_pip,leftHand_ring_finger_mcp,516,106,110,31,32 -leftHand_ring_finger_tip,leftHand_ring_finger_dip,518,108,112,32,33 -leftHand_pinky_mcp,pose_left_wrist,519,109,113,8,34 -leftHand_pinky_pip,leftHand_pinky_mcp,520,110,114,34,35 -leftHand_pinky_tip,leftHand_pinky_dip,522,112,116,35,36 -rightHand_thumb_cmc,pose_right_wrist,524,114,118,9,37 -rightHand_thumb_mcp,rightHand_thumb_cmc,525,115,119,37,38 -rightHand_thumb_tip,rightHand_thumb_ip,527,117,121,38,39 -rightHand_index_finger_mcp,pose_right_wrist,528,118,122,9,40 -rightHand_index_finger_pip,rightHand_index_finger_mcp,529,119,123,40,41 -rightHand_index_finger_tip,rightHand_index_finger_dip,531,121,125,41,42 -rightHand_middle_finger_mcp,pose_right_wrist,532,122,126,9,43 -rightHand_middle_finger_pip,rightHand_middle_finger_mcp,533,123,127,43,44 -rightHand_middle_finger_tip,rightHand_middle_finger_dip,535,125,129,44,45 -rightHand_ring_finger_mcp,pose_right_wrist,536,126,130,9,46 -rightHand_ring_finger_pip,rightHand_ring_finger_mcp,537,127,131,46,47 -rightHand_ring_finger_tip,rightHand_ring_finger_dip,539,129,133,47,48 -rightHand_pinky_mcp,pose_right_wrist,540,130,134,9,49 -rightHand_pinky_pip,rightHand_pinky_mcp,541,131,135,49,50 -rightHand_pinky_tip,rightHand_pinky_dip,543,133,137,50,51 diff --git a/runModel.sh b/runModel.sh deleted file mode 100644 index 967501d..0000000 --- a/runModel.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -#python main.py --config config/sign/train/train_joint.yaml - #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 - #num_point: 29 # 29 or 71 - -declare -a points=(29 71 29 71 29 71) -declare -a lrs=(0.05 0.1 0.05 0.1 0.1 0.05) -declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") - -for i in 1 -do - for j in 0 1 2 3 4 5 - do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train numero_parametros --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done - -""" - -for i in 5 15 25 35 45 55 65 75 85 95 -do - for j in 0 1 2 3 4 5 - do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train fundamentacion_3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done -""" diff --git a/runModelTest.sh b/runModelTest.sh deleted file mode 100644 index d0558f6..0000000 --- a/runModelTest.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -######################################################### -#python main.py --config config/sign/train/train_joint.yaml - #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 - #num_point: 29 # 29 or 71 or 51 - -declare -a points=(51 51 51) -declare -a lrs=(0.05 0.05 0.05) -declare -a datasets=("PUCP" "AEC" "WLASL") - - - -for i in 1 -do - for j in 0 1 2 - do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3 --mode_train cris_51points_v1 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done - From bdf732053ffe8567ae1c9a61fd2f9d9ab58fac56 Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Fri, 28 Oct 2022 03:24:23 +0000 Subject: [PATCH 36/56] lower number of parameters in the model --- SL-GCN/config/sign/train/train_joint.yaml | 20 ++-- SL-GCN/feeders/feeder.py | 8 +- SL-GCN/main.py | 139 ++++++++++++++++------ SL-GCN/model/decouple_gcn_attn.py | 34 +++--- SL-GCN/wandbFunctions.py | 2 +- 5 files changed, 136 insertions(+), 67 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index f38dd4c..fe9ea20 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -10,7 +10,7 @@ train_feeder_args: random_choose: True window_size: 100 random_shift: True - normalization: True + normalization: True # Set to false because the data is normalized random_mirror: True random_mirror_p: 0.5 is_vector: False @@ -35,15 +35,15 @@ model_args: #optim weight_decay: 0.0001 -base_lr: 0.1 -step: [150, 200] +base_lr: 0.005 +step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x :-> epoch > step) # training -device: [0,1] -keep_rate: 0.9 -only_train_epoch: 1 -batch_size: 32 +device: [0,1] # Cuda devices +keep_rate: 0.9 # mostly for Gamma in torch.bernoulli +only_train_epoch: 1 # To set req_grad to false for all "DecoupleA" parameters only for this epoch +batch_size: 128 test_batch_size: 64 -num_epoch: 250 -nesterov: True -warm_up_epoch: 20 +num_epoch: 2000 +nesterov: True # SGD +warm_up_epoch: 0 # To modify the learning rate => lr * (epoch+1)/warm_up_epoch diff --git a/SL-GCN/feeders/feeder.py b/SL-GCN/feeders/feeder.py index 6323253..15e4ddb 100644 --- a/SL-GCN/feeders/feeder.py +++ b/SL-GCN/feeders/feeder.py @@ -101,7 +101,7 @@ def __getitem__(self, index): assert data_numpy.shape[2] == 71 or data_numpy.shape[2] == 29 data_numpy = data_numpy[:,:,flip_index[data_numpy.shape[2]],:] if self.is_vector: - data_numpy[0,:,:,:] = - data_numpy[0,:,:,:] + data_numpy[0,:,:,:] = 1 - data_numpy[0,:,:,:] else: data_numpy[0,:,:,:] = 1 - data_numpy[0,:,:,:] #print("dabe after random mirror", data_numpy) @@ -109,7 +109,7 @@ def __getitem__(self, index): if self.normalization: # data_numpy = (data_numpy - self.mean_map) / self.std_map assert data_numpy.shape[0] == 2 - #print("dabe before norm", data_numpy) + #print("dabe before norm", data_numpy.shape) if self.is_vector: data_numpy[0,:,0,:] = data_numpy[0,:,0,:] - data_numpy[0,:,0,0].mean(axis=0) data_numpy[1,:,0,:] = data_numpy[1,:,0,:] - data_numpy[1,:,0,0].mean(axis=0) @@ -121,8 +121,8 @@ def __getitem__(self, index): #print("dabe before shift", data_numpy) if self.is_vector: - data_numpy[0,:,0,:] += random.random() * 20 - 10.0 - data_numpy[1,:,0,:] += random.random() * 20 - 10.0 + data_numpy[0,:,0,:] += random.random()/25 # * 20 - 10.0 + data_numpy[1,:,0,:] += random.random()/25 # * 20 - 10.0 else: data_numpy[0,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 data_numpy[1,:,:,:] += random.random()/25 #random.random() * 20 - 10.0 diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 3548aeb..7c33fb6 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -25,9 +25,15 @@ import wandbFunctions as wandbF import wandb import time + +# Import datetime class from datetime module +from datetime import datetime + from data_gen.getConnectingPoint import * wandbFlag = True +now = str(datetime.now()) +now = now.replace(':','-').replace(" ","_").split('.')[0] # class LabelSmoothingCrossEntropy(nn.Module): # def __init__(self): @@ -105,7 +111,7 @@ def get_parser(): # optim parser.add_argument('--base_lr', type=float, default=0.05, help='initial learning rate') - parser.add_argument('--num_epoch',type=int,default=500,help='stop training in which epoch') + parser.add_argument('--num_epoch',type=int,default=5,help='stop training in which epoch') parser.add_argument('--step',type=int,default=[20, 40, 60],nargs='+',help='the epoch where optimizer reduce the learning rate') parser.add_argument('--device',type=int,default=0,nargs='+',help='the indexes of GPUs for training or testing') @@ -131,9 +137,11 @@ def get_parser(): parser.add_argument("--num_class", type=int, default=0, help="Path to the testing dataset CSV file") parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") + parser.add_argument('--cleaned', type=bool, default=False, help='use nesterov or not') return parser - +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) class Processor(): """ @@ -170,6 +178,7 @@ def __init__(self, arg): self.lr = self.arg.base_lr self.best_acc = 0 self.best_tmp_acc = 0 + self.best_loss = 1000 self.maxTestAcc = 0 self.relative_maxtop5 = 0 @@ -209,7 +218,7 @@ def load_data(self): Feeder = import_class(self.arg.feeder) ln = Feeder(**self.arg.test_feeder_args) self.meaning = ln.meaning - pd.DataFrame(list(self.meaning.values())).to_csv('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name+ '/'+'labels_used.csv',header=None) + pd.DataFrame(list(self.meaning.values())).to_csv('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name+ '/'+now+'/'+'labels_used.csv',header=None) #print(ln.meaning) self.data_loader = dict() if self.arg.phase == 'train': @@ -243,8 +252,19 @@ def load_model(self): shutil.copy2(inspect.getfile(Model), self.arg.work_dir) self.model = Model(**self.arg.model_args).cuda(output_device) # print(self.model) + + print('#'*60) + print('#'*40) + print('#'*20) + print("Trainable params:", count_parameters(self.model)) + print('#'*20) + print('#'*40) + print('#'*60) + + if wandbFlag: wandbF.watch(self.model) + self.loss = nn.CrossEntropyLoss().cuda(output_device) path_model_init = os.path.join(arg.model_saved_directory,arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-"+str(arg.seed)+"-init.pt") @@ -258,8 +278,6 @@ def load_model(self): self.m_params = sum(p.numel() for p in self.model.parameters()) self.trainable_m_params= sum(p.numel() for p in self.model.parameters() if p.requires_grad) - - # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) @@ -363,7 +381,7 @@ def save_arg(self): if not os.path.exists(self.arg.work_dir): os.makedirs(self.arg.work_dir) os.makedirs(self.arg.work_dir + '/eval_results') - os.makedirs(self.arg.work_dir + '/eval_results/'+ model_name, exist_ok = True) + os.makedirs(self.arg.work_dir + '/eval_results/'+ model_name + '/' + now, exist_ok = True) with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f: yaml.dump(arg_dict, f) @@ -371,11 +389,14 @@ def save_arg(self): def adjust_learning_rate(self, epoch): if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam': + # to modify base_lr before warm_up_epoch if epoch < self.arg.warm_up_epoch: lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch else: + # to modify base_lr depending of arg.step values (arg.step is list of values) lr = self.arg.base_lr * ( 0.1 ** np.sum(epoch >= np.array(self.arg.step))) + for param_group in self.optimizer.param_groups: param_group['lr'] = lr return lr @@ -683,7 +704,9 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r if x != true[i] and wrong_file is not None: f_w.write(str(index[i]) + ',' + str(x) + ',' + str(true[i]) + '\n') + score = np.concatenate(score_frag) + epoch_loss = np.mean(loss_value) if 'UCLA' in arg.experiment_name: self.data_loader[ln].dataset.sample_name = np.arange( @@ -692,6 +715,34 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r accuracy = self.data_loader[ln].dataset.top_k(score, 1) top5 = self.data_loader[ln].dataset.top_k(score, 5) + if epoch_loss < self.best_loss: + self.best_loss = epoch_loss + + class_acc = {element:[] for element in meaning} + totalRows = 0 + with open('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name+ '/'+ now + '/loss-submission.csv', 'w',) as of: + writer = csv.writer(of) + accum = 0 + for trueName, truePred in trueLabels.items(): + + sample = trueName + #print(f'Predicting {sample}', end=' ') + #print(f'as {submission[sample]} - pred {submission[sample]} and real {row[1]}') + match=0 + if int(truePred) == int(submission[sample]): + match=1 + accum+=1 + totalRows+=1 + class_acc[saveLabels[sample]].append(match) + + writer.writerow([sample, saveLabels[sample], submission[sample], str(truePred), str(match)]) + + class_acc = [(k,sum(v),len(v),sum(v)/len(v)) for k,v in class_acc.items()] + + pd.DataFrame(class_acc, columns=["gloss","prediction","true label","acc"]).to_csv('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name + '/'+ now + '/loss-submission.csv',index=None) + + + if accuracy > self.best_acc: self.best_acc = accuracy @@ -733,7 +784,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r #size_arr = df_cm.sum(axis = 1) #maxi = max(size_arr) - group_percentages = ["{0:.1%}".format(value) for value in confusion_matrix.flatten()] + group_percentages = ["{0:.1%}".format(_value) for _value in confusion_matrix.flatten()] annot = ["{1}".format(v2, v1) for v1, v2 in zip(group_counts, group_percentages)] annot = np.asarray(annot).reshape(self.arg.model_args["num_class"], self.arg.model_args["num_class"]) @@ -751,10 +802,10 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r print('*'*20) print('*'*20) - print('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl') + print('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name + '/'+ now + '/best_acc' + '.pkl') - with open('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name+ '/best_acc' + '.pkl'.format( + with open('./work_dir/' + arg.experiment_name + '/eval_results/'+ model_name + '/'+ now + '/best_acc' + '.pkl'.format( epoch, accuracy), 'wb') as f: pickle.dump(score_dict, f) @@ -767,13 +818,12 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r print('*'*20) print('*'*20) print(self.arg.model_saved_directory) - print(self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') - torch.save(weights, self.arg.model_saved_directory + '-' + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + print(self.arg.model_saved_directory + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') + torch.save(weights, self.arg.model_saved_directory + arg.kp_model + '-' + arg.database + "-Lr" + str(arg.base_lr) + "-NClasses" + str(arg.model_args["num_class"]) + '-' + str(accuracy) + '.pt') - predLabels = [] - groundLabels = [] + class_acc = {element:[] for element in meaning} totalRows = 0 - with open('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name+ '/'+'submission.csv', 'w',) as of: + with open('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name+ '/'+ now + '/acc-submission.csv', 'w',) as of: writer = csv.writer(of) accum = 0 for trueName, truePred in trueLabels.items(): @@ -782,15 +832,17 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r #print(f'Predicting {sample}', end=' ') #print(f'as {submission[sample]} - pred {submission[sample]} and real {row[1]}') match=0 - predLabels.append(submission[sample]) - groundLabels.append(int(truePred)) if int(truePred) == int(submission[sample]): match=1 accum+=1 totalRows+=1 + class_acc[saveLabels[sample]].append(match) writer.writerow([sample, saveLabels[sample], submission[sample], str(truePred), str(match)]) + class_acc = [(k,sum(v),len(v),sum(v)/len(v)) for k,v in class_acc.items()] + + pd.DataFrame(class_acc, columns=["gloss","prediction","true label","acc"]).to_csv('./work_dir/' + self.arg.experiment_name + '/eval_results/'+ model_name + '/'+ now + '/acc-submission_acc.csv',index=None) if epoch + 1 == arg.num_epoch: @@ -804,8 +856,10 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r labels=meaning, classes_to_plot=None)}) except: pass + #wandb.log({"val_sklearn_conf_mat": wandb.sklearn.plot_confusion_matrix(, # , meaning_3)}) + ''' wandb.log({"VAL_conf_mat" : wandb.plot.confusion_matrix( #probs=score, @@ -818,12 +872,12 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r print('Eval Accuracy: ', accuracy, ' model: ', self.arg.model_saved_directory) if wandbFlag: - mean_loss = np.mean(loss_value) - if mean_loss>10: + mean_loss = epoch_loss + if mean_loss > 10: mean_loss = 10 self.maxTestAcc = max(accuracy,self.maxTestAcc) - + if self.maxTestAcc == accuracy: self.relative_maxtop5 = top5 @@ -832,8 +886,10 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r score_dict = dict( zip(self.data_loader[ln].dataset.sample_name, score)) + self.print_log('\tMean {} loss of {} batches: {}.'.format( - ln, len(self.data_loader[ln]), np.mean(loss_value))) + ln, len(self.data_loader[ln]), epoch_loss)) + for k in self.arg.show_topk: self.print_log('\tTop{}: {:.2f}%'.format( k, 100 * self.data_loader[ln].dataset.top_k(score, k))) @@ -844,7 +900,7 @@ def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, r ''' print("END") - return np.mean(loss_value) + return epoch_loss def start(self): @@ -919,9 +975,6 @@ def import_class(name): for id_iteration in range(1): # load arg form config file - - - print('arg.config',arg.config) if arg.config is not None: with open(arg.config, 'r') as f: @@ -937,10 +990,18 @@ def import_class(name): # load arg form config file arg = parser.parse_args() + print(arg.cleaned) + if arg.cleaned: + print("cleaned", "="*10) + arg.training_set_path = '../../ConnectingPoints/split/cleaned/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../ConnectingPoints/split/cleaned/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + else: + print("complete", "$"*10) + arg.training_set_path = '../../ConnectingPoints/split/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' + arg.testing_set_path = '../../ConnectingPoints/split/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' - arg.training_set_path = '../../ConnectingPoints/split/'+arg.database+'--'+arg.keypoints_model+'-Train.hdf5' - arg.testing_set_path = '../../ConnectingPoints/split/'+arg.database+'--'+arg.keypoints_model+'-Val.hdf5' + # DATABASE ARG if arg.database == 'AEC': arg.num_class = 28 @@ -953,7 +1014,7 @@ def import_class(name): arg.testing_set_path = '../../../PUCP_PSL_DGI156--'+arg.keypoints_model+'-Val.hdf5' if arg.database == 'AEC-DGI156-DGI305': - arg.num_class = 72 + arg.num_class = 70 arg.model_args['num_class'] =arg.num_class arg.model_args['num_point'] =arg.keypoints_number @@ -978,6 +1039,8 @@ def import_class(name): "weight-decay": arg.weight_decay, "batch-size":arg.batch_size, "base-lr": arg.base_lr, + "step": arg.step, + "warm_up_epoch": arg.warm_up_epoch, "kp-model": arg.keypoints_model, "num_points": arg.keypoints_number, "database": arg.database, @@ -988,18 +1051,27 @@ def import_class(name): import wandb import os - os.environ["WANDB_API_KEY"] = "" + os.environ["WANDB_API_KEY"] = "9c7a2412b1f242359f1a4915b620f578b32e96ac" if wandbFlag: - wandb.init(project="", - entity="", - tags=["to_compare","complete_data"], + if arg.cleaned: + wandb.init(project="three-datasets-psl", + entity="JoeNatan30", + tags=["cleaned_data","dist_dur_ban","model_mod"], + reinit=True, + config=config) + else: + wandb.init(project="three-datasets-psl", + entity="JoeNatan30", + tags=["complete_data","dist_dur_ban","model_mod"], reinit=True, config=config) config = wandb.config print('+'*10) print('config :',config) + + print('change learning in step: ',arg.step, "and (warm_up_epoch)", arg.warm_up_epoch) print('+'*10) arg.base_lr = config["base-lr"] arg.batch_size = config["batch-size"] @@ -1008,7 +1080,7 @@ def import_class(name): arg.kp_model = config["kp-model"] arg.database = config["database"] - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" arg.work_dir = "work_dir/"+arg.experiment_name +"/" print('*'*20) @@ -1031,11 +1103,10 @@ def import_class(name): model_name = runAndModelName print('model_name : ',model_name) if wandbFlag: - wandb.run.name = runAndModelName + wandb.run.name = runAndModelName + "-" + now wandb.run.save() - print("*"*30) print("*"*30) print(arg) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 9f8bbae..800552c 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -237,17 +237,15 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz self.l1 = TCN_GCN_unit(in_channels, 64, A, groups, num_point, block_size, residual=False) - self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l5 = TCN_GCN_unit( - 64, 128, A, groups, num_point, block_size, stride=2) - self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(128, 256, A, groups, - num_point, block_size, stride=2) - self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + #self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) + self.l5 = TCN_GCN_unit(64, 128, A, groups, num_point, block_size, stride=2) + #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) + self.l8 = TCN_GCN_unit(128, 256, A, groups,num_point, block_size, stride=2) + #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) + #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) self.fc = nn.Linear(256, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) @@ -261,15 +259,15 @@ def forward(self, x, keep_prob=0.9): 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) x = self.l1(x, 1.0) - x = self.l2(x, 1.0) - x = self.l3(x, 1.0) - x = self.l4(x, 1.0) + #x = self.l2(x, 1.0) + #x = self.l3(x, 1.0) + #x = self.l4(x, 1.0) x = self.l5(x, 1.0) - x = self.l6(x, 1.0) - x = self.l7(x, keep_prob) + #x = self.l6(x, 1.0) + #x = self.l7(x, keep_prob) x = self.l8(x, keep_prob) - x = self.l9(x, keep_prob) - x = self.l10(x, keep_prob) + #x = self.l9(x, keep_prob) + #x = self.l10(x, keep_prob) # N*M,C,T,V c_new = x.size(1) diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index 4b33c6e..c38c246 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -33,7 +33,7 @@ def wandbTrainLog(trainLoss, TrainAcc,p1,p2): "Train accuracy": TrainAcc, "m_params":p1, "trainable_m_params":p2 - }) + }, commit=False) def wandbValLog(testLoss, TestAcc, top5,maxTestAcc,relative_maxtop5): wandb.log({"Val Loss": testLoss, From 559670f9af187b3a3ab7c6651973092f44cc1ecf Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Sun, 30 Oct 2022 06:25:39 +0000 Subject: [PATCH 37/56] readme s --- SL-GCN/readme.md | 25 ++++++++++++++++++++----- readme.md | 9 +++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index aac02b7..990b695 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -1,11 +1,26 @@ # Skeleton Based Sign Language Recognition +We prepare this runModel.sh file to run the model. - ``` -bash runModel.sh +Note: If you do not use WandB please change to False "wandbFlag" variable value in main.py (line 34) -``` +## main.py -Note: if you don't have a wandb account, you need to set "wandbFlag" variable of "main.py" to False and modify the code to have reports +* --seed => set the seed to train [used 5, 15, 25, 35 and 45] +* --experiment_name => The name where the model will be saved +* --database => part of the name of the HDF5 file that will be used to retrieve the imput data (e.x: AEC, PUCP-DGI156, WLASL) +* --keypoints_model => part of the name of the HDF5 file that will be used to retrieve the imput data (e.x: mediapipe, wholepose, openpose) +* --base_lr => learning rate +* --keypoints_number => number of keypoints used (29 or 71) +* --num_epoch => number of epochs +* --mode_train => to show in wandb the number of parameters (always write: numero_parametros" +* --cleaned => add this to use the cleaned data from Connecting points repository ---------------------------- +# in windows +``` +bash runModel.sh +``` +# In Linux +``` +sh runModel.sh +``` diff --git a/readme.md b/readme.md index e2a83cf..fb90448 100644 --- a/readme.md +++ b/readme.md @@ -1,3 +1,12 @@ +# About our paper "Impact of Pose Estimation Models for landmark-based Sign Language Recognition" +We use SL-GCN adapted module to make it run with our dataset obtained from [ConnectingPoints Repository]() +For our experiments we only train using Joints + +More detail about our process to run the model can be found in the READNE file in "SL-GCN" folder. + +The following sections of this readme are the explanation the author of this forked repository wrote. +----------------- + Skeleton Aware Multi-modal Sign Language Recognition ========= By [Songyao Jiang](https://www.songyaojiang.com/), [Bin Sun](https://github.com/Sun1992/), [Lichen Wang](https://sites.google.com/site/lichenwang123/), [Yue Bai](https://yueb17.github.io/), [Kunpeng Li](https://kunpengli1994.github.io/) and [Yun Fu](http://www1.ece.neu.edu/~yunfu/). From b1af751041978750162aa9bdbb9bb1021e8c4d3b Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Sun, 30 Oct 2022 08:42:35 +0000 Subject: [PATCH 38/56] readme mod --- SL-GCN/readme.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/SL-GCN/readme.md b/SL-GCN/readme.md index 990b695..c79c87b 100644 --- a/SL-GCN/readme.md +++ b/SL-GCN/readme.md @@ -16,6 +16,12 @@ Note: If you do not use WandB please change to False "wandbFlag" variable value * --mode_train => to show in wandb the number of parameters (always write: numero_parametros" * --cleaned => add this to use the cleaned data from Connecting points repository + +--------------- + +To automatize our work we create this .sh file +modify it as you need + # in windows ``` bash runModel.sh From aad01c8c3b0d66991866a159dee9caccad3a6faf Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 30 Oct 2022 10:00:48 -0500 Subject: [PATCH 39/56] reduce params v1 --- SL-GCN/config/sign/train/train_joint.yaml | 6 ++-- SL-GCN/main.py | 34 +++++++++++++++-------- SL-GCN/requirements.txt | 3 +- SL-GCN/runModelTest.sh | 21 ++++++++------ 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 5b336df..7a2617e 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -40,11 +40,11 @@ step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x : # training #device: [0, 1,2,3 ] - -device: [0,1,2,3] +device: [0] keep_rate: 0.9 only_train_epoch: 1 -batch_size: 64 +#batch_size: 64 +batch_size: 8 test_batch_size: 8 num_epoch: 250 nesterov: True diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 122688d..3c804fe 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -138,6 +138,7 @@ def get_parser(): parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") parser.add_argument('--cleaned', type=bool, default=False, help='use nesterov or not') + parser.add_argument('--user', type=str, default="cristian", help='user of the experiment') return parser def count_parameters(model): @@ -1052,22 +1053,30 @@ def import_class(name): } import wandb import os + from dotenv import load_dotenv + load_dotenv() - os.environ["WANDB_API_KEY"] = "9c7a2412b1f242359f1a4915b620f578b32e96ac" + os.environ["WANDB_API_KEY"] = os.getenv('API_KEY_WAND') + print("WANDB API KEY :",os.environ["WANDB_API_KEY"][:5]) if wandbFlag: - if arg.cleaned: - wandb.init(project="three-datasets-psl", + if arg.user =='cristian': + wandb.init(project="sign_language_project", + entity="ml_projects", + config=config) + else: + if arg.cleaned: + wandb.init(project="three-datasets-psl", + entity="JoeNatan30", + tags=["cleaned_data","dist_dur_ban","model_mod"], + reinit=True, + config=config) + else: + wandb.init(project="three-datasets-psl", entity="JoeNatan30", - tags=["cleaned_data","dist_dur_ban","model_mod"], + tags=["complete_data","dist_dur_ban","model_mod"], reinit=True, config=config) - else: - wandb.init(project="three-datasets-psl", - entity="JoeNatan30", - tags=["complete_data","dist_dur_ban","model_mod"], - reinit=True, - config=config) config = wandb.config print('+'*10) @@ -1082,7 +1091,10 @@ def import_class(name): arg.kp_model = config["kp-model"] arg.database = config["database"] - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" + if arg.user =='cristian': + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + else: + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" arg.work_dir = "work_dir/"+arg.experiment_name +"/" print('*'*20) diff --git a/SL-GCN/requirements.txt b/SL-GCN/requirements.txt index 7d1bbb8..6968f3b 100644 --- a/SL-GCN/requirements.txt +++ b/SL-GCN/requirements.txt @@ -8,4 +8,5 @@ torchvision --extra-index-url https://download.pytorch.org/whl/cu113 h5py seaborn torchmetrics -wandb==0.13.2 \ No newline at end of file +wandb==0.13.2 +python-dotenv==0.21.0 diff --git a/SL-GCN/runModelTest.sh b/SL-GCN/runModelTest.sh index c9df403..d3260d9 100644 --- a/SL-GCN/runModelTest.sh +++ b/SL-GCN/runModelTest.sh @@ -5,19 +5,24 @@ #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 #num_point: 29 # 29 or 71 or 51 -declare -a points=(51 51 51) -declare -a lrs=(0.05 0.05 0.05) -declare -a datasets=("PUCP" "AEC" "WLASL") +#declare -a points=(51 51 51) +#declare -a lrs=(0.05 0.05 0.05) +#declare -a datasets=("PUCP" "AEC" "WLASL") +declare -a points=(29 71 29 71 29 71) +declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05) +declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") -for i in 0 5 15 25 35 +#for i in 0 5 15 25 35 +# for j in 0 1 2 +for i in 0 do - for j in 0 1 2 + for j in 0 1 2 3 4 5 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 400 --mode_train neurips_51points_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 3dfd56eaec8fe6febb97a56142577da17b90ca4a Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 30 Oct 2022 12:57:13 -0500 Subject: [PATCH 40/56] exp v7 --- SL-GCN/model/decouple_gcn_attn.py | 8 +++----- SL-GCN/runModelTest.sh | 14 +++++++------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 800552c..247c44a 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -1,7 +1,5 @@ import torch import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable import numpy as np import math from model.dropSke import DropBlock_Ske @@ -240,14 +238,14 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l5 = TCN_GCN_unit(64, 128, A, groups, num_point, block_size, stride=2) + self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(128, 256, A, groups,num_point, block_size, stride=2) + self.l8 = TCN_GCN_unit(64, 64, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - self.fc = nn.Linear(256, num_class) + self.fc = nn.Linear(64, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) diff --git a/SL-GCN/runModelTest.sh b/SL-GCN/runModelTest.sh index d3260d9..6f4235c 100644 --- a/SL-GCN/runModelTest.sh +++ b/SL-GCN/runModelTest.sh @@ -9,20 +9,20 @@ #declare -a lrs=(0.05 0.05 0.05) #declare -a datasets=("PUCP" "AEC" "WLASL") -declare -a points=(29 71 29 71 29 71) -declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05) -declare -a datasets=("AEC" "AEC" "PUCP" "PUCP" "WLASL" "WLASL") +declare -a points=(29 51 71 29 51 71 29 51 71) +declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) +declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") #for i in 0 5 15 25 35 # for j in 0 1 2 for i in 0 do - for j in 0 1 2 3 4 5 + for j in 0 1 2 3 4 5 6 7 8 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v5_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v7_reduce + #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 3e320242dcc622e04444fe9c3e7cee9360cc6b2b Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 30 Oct 2022 16:46:22 -0500 Subject: [PATCH 41/56] experimento 9 --- SL-GCN/model/decouple_gcn_attn.py | 8 ++++---- SL-GCN/runModelTest.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 247c44a..7341e84 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,15 +233,15 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 64, A, groups, num_point, + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) + #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(64, 64, A, groups,num_point, block_size, stride=2) + self.l8 = TCN_GCN_unit(32, 64, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) @@ -260,7 +260,7 @@ def forward(self, x, keep_prob=0.9): #x = self.l2(x, 1.0) #x = self.l3(x, 1.0) #x = self.l4(x, 1.0) - x = self.l5(x, 1.0) + #x = self.l5(x, 1.0) #x = self.l6(x, 1.0) #x = self.l7(x, keep_prob) x = self.l8(x, keep_prob) diff --git a/SL-GCN/runModelTest.sh b/SL-GCN/runModelTest.sh index 6f4235c..9e8b387 100644 --- a/SL-GCN/runModelTest.sh +++ b/SL-GCN/runModelTest.sh @@ -20,7 +20,7 @@ for i in 0 do for j in 0 1 2 3 4 5 6 7 8 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v7_reduce + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v10_reduce #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done From 85fe6c7c2cd3dc7424586fcc74e592ec5f7a784f Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 30 Oct 2022 16:55:19 -0500 Subject: [PATCH 42/56] experimento 10 --- SL-GCN/model/decouple_gcn_attn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 7341e84..f79b64d 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -241,11 +241,11 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(32, 64, A, groups,num_point, block_size, stride=2) + self.l8 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - self.fc = nn.Linear(64, num_class) + self.fc = nn.Linear(32, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) From 1590e397a1c051dee1ab51789a665ea742336939 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Mon, 31 Oct 2022 16:47:10 -0500 Subject: [PATCH 43/56] analysis optimizacion model aec 29 v1 --- .gitignore | 1 + SL-GCN/main.py | 28 ++++++++++++++++++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 26 ++++++++++++++++++ SL-GCN/wandbFunctions.py | 5 ++-- 4 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 SL-GCN/runModelTest_optimizacion_analysis.sh diff --git a/.gitignore b/.gitignore index 9032ca2..4e8fc2a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +*.env data/ save_models/ work_dir/ diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 3c804fe..c689205 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -279,6 +279,30 @@ def load_model(self): self.m_params = sum(p.numel() for p in self.model.parameters()) self.trainable_m_params= sum(p.numel() for p in self.model.parameters() if p.requires_grad) + #frames*kp-model*numero_de_videos + total_informacion_analizada={ + "29":{ + "AEC":1812500, + "PUCP":2456300, + "WLASL":3932400 + }, + "51":{ + "AEC":3187500, + "PUCP":4319700, + "WLASL":6915600 + } + , + "71":{ + "AEC":4437500, + "PUCP":6013700, + "WLASL":9627600 + } + } + + + self.factor_trainable_m_params = self.trainable_m_params/total_informacion_analizada[str(self.arg.keypoints_number)][str(self.arg.database)] + + # self.loss = LabelSmoothingCrossEntropy().cuda(output_device) @@ -515,7 +539,7 @@ def train_zero(self, epoch, save_model=False): mean_loss = np.mean(loss_value) if mean_loss>10: mean_loss = 10 - wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params,self.factor_trainable_m_params) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) @@ -627,7 +651,7 @@ def train(self, epoch, save_model=False): if mean_loss>10: mean_loss = 10 - wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params) + wandbF.wandbTrainLog(mean_loss, accuracy,self.m_params,self.trainable_m_params,self.factor_trainable_m_params) # statistics of time consumption and loss proportion = { k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values())))) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh new file mode 100644 index 0000000..d3af519 --- /dev/null +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +######################################################### +#python main.py --config config/sign/train/train_joint.yaml + #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 + #num_point: 29 # 29 or 71 or 51 + +#declare -a points=(51 51 51) +#declare -a lrs=(0.05 0.05 0.05) +#declare -a datasets=("PUCP" "AEC" "WLASL") + +declare -a points=(29 51 71 29 51 71 29 51 71) +declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) +declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") + + +for i in 0 +do + for j in 0 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2 --mode_train optimizacion_analysis_aec_29_v1 + #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done + diff --git a/SL-GCN/wandbFunctions.py b/SL-GCN/wandbFunctions.py index c38c246..b83770a 100644 --- a/SL-GCN/wandbFunctions.py +++ b/SL-GCN/wandbFunctions.py @@ -28,11 +28,12 @@ def initConfigWandb(num_layers, num_classes, batch_size, config["epsilon"] = epsilon ''' -def wandbTrainLog(trainLoss, TrainAcc,p1,p2): +def wandbTrainLog(trainLoss, TrainAcc,p1,p2,factor_params): wandb.log({"Train loss": trainLoss, "Train accuracy": TrainAcc, "m_params":p1, - "trainable_m_params":p2 + "trainable_m_params":p2, + "ratio_params_model":factor_params }, commit=False) def wandbValLog(testLoss, TestAcc, top5,maxTestAcc,relative_maxtop5): From e539a368e91c1fb5a94b3bb52aa267e24740affc Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Mon, 31 Oct 2022 22:59:14 +0000 Subject: [PATCH 44/56] v2 2 capas 16 a 32 --- SL-GCN/config/sign/train/train_joint.yaml | 8 ++++---- SL-GCN/model/decouple_gcn_attn.py | 4 ++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 7a2617e..afc7942 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -39,12 +39,12 @@ base_lr: 0.005 step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x :-> epoch > step) # training -#device: [0, 1,2,3 ] -device: [0] +device: [0, 1,2,3 ] +#device: [0] keep_rate: 0.9 only_train_epoch: 1 -#batch_size: 64 -batch_size: 8 +batch_size: 64 +#batch_size: 8 test_batch_size: 8 num_epoch: 250 nesterov: True diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index f79b64d..4779fab 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,7 +233,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point, block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) @@ -241,7 +241,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) + self.l8 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index d3af519..a1655ae 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -18,7 +18,7 @@ for i in 0 do for j in 0 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2 --mode_train optimizacion_analysis_aec_29_v1 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v2 #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done From 9ac0c6f054fa22f4d5e4e809ca21565b6e07e309 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Mon, 31 Oct 2022 23:39:18 +0000 Subject: [PATCH 45/56] v2 1 capa a 32 --- SL-GCN/model/decouple_gcn_attn.py | 6 +++--- SL-GCN/runModelTest_optimizacion_analysis.sh | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 4779fab..fb7d13b 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,7 +233,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point, + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) @@ -241,7 +241,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) + #self.l8 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) @@ -263,7 +263,7 @@ def forward(self, x, keep_prob=0.9): #x = self.l5(x, 1.0) #x = self.l6(x, 1.0) #x = self.l7(x, keep_prob) - x = self.l8(x, keep_prob) + #x = self.l8(x, keep_prob) #x = self.l9(x, keep_prob) #x = self.l10(x, keep_prob) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index a1655ae..78a8621 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -18,7 +18,7 @@ for i in 0 do for j in 0 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v2 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v3 #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done From d159271ade865331de1980e71341ae1f0b94b632 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Tue, 1 Nov 2022 01:03:41 +0000 Subject: [PATCH 46/56] v2 1 capa a 16 --- SL-GCN/model/decouple_gcn_attn.py | 4 ++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index fb7d13b..1318793 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,7 +233,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point, block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) @@ -245,7 +245,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - self.fc = nn.Linear(32, num_class) + self.fc = nn.Linear(16, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 78a8621..928c4ad 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -18,7 +18,7 @@ for i in 0 do for j in 0 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v4 #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done From c1415293268cfe1b4dfcd7672dd33d223370a33d Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Tue, 1 Nov 2022 02:47:54 +0000 Subject: [PATCH 47/56] corriendo analysis completo 1 capa a 16 para todos los datasets --- SL-GCN/model/decouple_gcn_attn.py | 13 ++++--- SL-GCN/model/models_by_dataset.py | 36 ++++++++++++++++++++ SL-GCN/runModelTest_optimizacion_analysis.sh | 8 ++--- 3 files changed, 49 insertions(+), 8 deletions(-) create mode 100644 SL-GCN/model/models_by_dataset.py diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 1318793..3b6f643 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -4,7 +4,7 @@ import math from model.dropSke import DropBlock_Ske from model.dropT import DropBlockT_1d - +import time def import_class(name): components = name.split('.') @@ -231,10 +231,15 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz self.graph.num_node = num_point A = self.graph.A + + print("@"*30) + print("num_class : ",num_class) + print("num_point : ",num_point) + print("@"*30) + time.sleep(2) + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - - self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point, - block_size, residual=False) + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) diff --git a/SL-GCN/model/models_by_dataset.py b/SL-GCN/model/models_by_dataset.py new file mode 100644 index 0000000..ab36c5c --- /dev/null +++ b/SL-GCN/model/models_by_dataset.py @@ -0,0 +1,36 @@ +#AEC-29 v1 +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + self.graph.num_node = num_point + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l8 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(32, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + x = self.l8(x, keep_prob) + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) \ No newline at end of file diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 928c4ad..6253860 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -16,11 +16,11 @@ declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLA for i in 0 do - for j in 0 + for j in 1 2 3 4 5 6 7 8 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 5000 --mode_train optimizacion_analysis_aec_29_v4 - #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v4 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 41c7a2caaf84c54b413276b36783c67b29065799 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Tue, 1 Nov 2022 23:16:30 +0000 Subject: [PATCH 48/56] optimizacion_analysis_v3 PUCP 29 --- SL-GCN/model/decouple_gcn_attn.py | 39 ++------------------ SL-GCN/model/models_by_dataset.py | 37 ++++++++++++++++++- SL-GCN/runModelTest_optimizacion_analysis.sh | 23 ++++++++++-- 3 files changed, 59 insertions(+), 40 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 3b6f643..aa0304c 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -231,26 +231,11 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz self.graph.num_node = num_point A = self.graph.A - - print("@"*30) - print("num_class : ",num_class) - print("num_point : ",num_point) - print("@"*30) - time.sleep(2) - self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) - #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - #self.l4 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) - #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) - #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - #self.l8 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) - #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) - - self.fc = nn.Linear(16, num_class) + + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, + block_size, residual=False) + self.fc = nn.Linear(32, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) @@ -262,23 +247,7 @@ def forward(self, x, keep_prob=0.9): 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) x = self.l1(x, 1.0) - #x = self.l2(x, 1.0) - #x = self.l3(x, 1.0) - #x = self.l4(x, 1.0) - #x = self.l5(x, 1.0) - #x = self.l6(x, 1.0) - #x = self.l7(x, keep_prob) - #x = self.l8(x, keep_prob) - #x = self.l9(x, keep_prob) - #x = self.l10(x, keep_prob) - - # N*M,C,T,V c_new = x.size(1) - - # print(x.size()) - # print(N, M, c_new) - - # x = x.view(N, M, c_new, -1) x = x.reshape(N, M, c_new, -1) x = x.mean(3).mean(1) diff --git a/SL-GCN/model/models_by_dataset.py b/SL-GCN/model/models_by_dataset.py index ab36c5c..1e813be 100644 --- a/SL-GCN/model/models_by_dataset.py +++ b/SL-GCN/model/models_by_dataset.py @@ -1,5 +1,40 @@ #AEC-29 v1 -class Model(nn.Module): + +class Model_v3(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + self.graph.num_node = num_point + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, + block_size, residual=False) + self.fc = nn.Linear(32, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + + return self.fc(x) + +class Model_v4(nn.Module): def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): super(Model, self).__init__() diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 6253860..574d985 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -13,14 +13,29 @@ declare -a points=(29 51 71 29 51 71 29 51 71) declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") - +''' for i in 0 do for j in 1 2 3 4 5 6 7 8 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v4 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done +''' + +for i in 0 +do + for j in 3 + do + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done +# optimizacion_analysis_aec_29_v4 model v4 +# optimizacion_analysis_aec_29_v5 model v4 +# optimizacion_analysis_v3 model v3 + From b1ce55485a58969c024344ef0fd23eda436427f1 Mon Sep 17 00:00:00 2001 From: Chameleon Cloud User Date: Sun, 6 Nov 2022 14:31:52 +0000 Subject: [PATCH 49/56] analisis v4 modelo mas ligero --- SL-GCN/config/sign/train/train_joint.yaml | 2 +- SL-GCN/main.py | 6 +++- SL-GCN/model/decouple_gcn_attn.py | 9 ++--- SL-GCN/model/models_by_dataset.py | 37 +++++++++++++++++++- SL-GCN/requirements.txt | 2 +- SL-GCN/runModelTest_optimizacion_analysis.sh | 9 ++--- 6 files changed, 53 insertions(+), 12 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index afc7942..97a64c7 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -39,7 +39,7 @@ base_lr: 0.005 step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x :-> epoch > step) # training -device: [0, 1,2,3 ] +device: [0, 1] #device: [0] keep_rate: 0.9 only_train_epoch: 1 diff --git a/SL-GCN/main.py b/SL-GCN/main.py index c689205..ad70cba 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -1141,7 +1141,11 @@ def import_class(name): model_name = runAndModelName print('model_name : ',model_name) if wandbFlag: - wandb.run.name = runAndModelName + "-" + now + + if arg.user =='cristian': + wandb.run.name = runAndModelName + else: + wandb.run.name = runAndModelName + "-" + now wandb.run.save() diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index aa0304c..8a54711 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,9 +233,8 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, - block_size, residual=False) - self.fc = nn.Linear(32, num_class) + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(16, num_class) nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) @@ -247,8 +246,10 @@ def forward(self, x, keep_prob=0.9): 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) x = self.l1(x, 1.0) + + # N*M,C,T,V c_new = x.size(1) x = x.reshape(N, M, c_new, -1) x = x.mean(3).mean(1) - return self.fc(x) + return self.fc(x) \ No newline at end of file diff --git a/SL-GCN/model/models_by_dataset.py b/SL-GCN/model/models_by_dataset.py index 1e813be..93054ba 100644 --- a/SL-GCN/model/models_by_dataset.py +++ b/SL-GCN/model/models_by_dataset.py @@ -34,7 +34,7 @@ def forward(self, x, keep_prob=0.9): return self.fc(x) -class Model_v4(nn.Module): +class Model_v2(nn.Module): def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): super(Model, self).__init__() @@ -68,4 +68,39 @@ def forward(self, x, keep_prob=0.9): x = x.reshape(N, M, c_new, -1) x = x.mean(3).mean(1) + return self.fc(x) + +class Model_v4(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): + super(Model, self).__init__() + + if graph is None: + raise ValueError() + else: + Graph = import_class(graph) + self.graph = Graph(**graph_args) + self.graph.num_node = num_point + + A = self.graph.A + self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) + + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(16, num_class) + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) + bn_init(self.data_bn, 1) + + def forward(self, x, keep_prob=0.9): + N, C, T, V, M = x.size() + x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) + x = self.data_bn(x) + x = x.view(N, M, V, C, T).permute( + 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) + + x = self.l1(x, 1.0) + + # N*M,C,T,V + c_new = x.size(1) + x = x.reshape(N, M, c_new, -1) + x = x.mean(3).mean(1) + return self.fc(x) \ No newline at end of file diff --git a/SL-GCN/requirements.txt b/SL-GCN/requirements.txt index 6968f3b..04814cd 100644 --- a/SL-GCN/requirements.txt +++ b/SL-GCN/requirements.txt @@ -9,4 +9,4 @@ h5py seaborn torchmetrics wandb==0.13.2 -python-dotenv==0.21.0 +python-dotenv \ No newline at end of file diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 574d985..f64854a 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -24,14 +24,15 @@ do done done ''' +# for j in 4 5 6 7 8 for i in 0 do - for j in 3 + for j in 6 7 8 5 4 3 2 1 0 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1000 --mode_train optimizacion_analysis_v3 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 89d32437b7a1681478e68713375625fe22a0c4b0 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Sun, 6 Nov 2022 17:20:22 +0000 Subject: [PATCH 50/56] escoger el model_version esta automatizado --- SL-GCN/main.py | 12 +- SL-GCN/model/decouple_gcn_attn.py | 71 +++++++++++- SL-GCN/model/models_by_dataset.py | 112 ++++++------------- SL-GCN/runModelTest_optimizacion_analysis.sh | 21 +++- 4 files changed, 125 insertions(+), 91 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index ad70cba..90e5018 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -134,11 +134,14 @@ def get_parser(): parser.add_argument("--keypoints_model", type=str, default="openpose", help="Path to the training dataset CSV file") parser.add_argument("--keypoints_number", type=int, default=29, help="Path to the training dataset CSV file") parser.add_argument("--testing_set_path", type=str, default="", help="Path to the testing dataset CSV file") - parser.add_argument("--num_class", type=int, default=0, help="Path to the testing dataset CSV file") - parser.add_argument("--database", type=str, default="", help="Path to the testing dataset CSV file") - parser.add_argument("--mode_train", type=str, default="train", help="Path to the testing dataset CSV file") + parser.add_argument("--num_class", type=int, default=0, help="number of points") + parser.add_argument("--database", type=str, default="", help="name of database") + parser.add_argument("--mode_train", type=str, default="train", help="training special characteristic to name") parser.add_argument('--cleaned', type=bool, default=False, help='use nesterov or not') parser.add_argument('--user', type=str, default="cristian", help='user of the experiment') + parser.add_argument('--model_version', type=int, default=1, help='model version of architecture') + + return parser def count_parameters(model): @@ -1045,7 +1048,8 @@ def import_class(name): arg.model_args['num_class'] =arg.num_class arg.model_args['num_point'] =arg.keypoints_number - + arg.model_args['model_version'] = arg.model_version + arg.model_args['graph_args']['num_node'] =arg.keypoints_number #num_class: 28 # AEC=28, PUCP=36 , WLASL=101 diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index 8a54711..99fc829 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -219,10 +219,13 @@ def forward(self, x, keep_prob): return self.relu(y + x_skip) + class Model(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2,model_version=1): super(Model, self).__init__() - + self.model_version = model_version + print("model_version : ",self.model_version) + time.sleep(1) if graph is None: raise ValueError() else: @@ -233,8 +236,43 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) - self.fc = nn.Linear(16, num_class) + + if self.model_version == 0: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(32, 64, A, groups,num_point, block_size, stride=2) + self.l3 = TCN_GCN_unit(64, 128, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(128, num_class) + + if self.model_version == 1: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(32, 16, A, groups,num_point, block_size, stride=2) + self.l3 = TCN_GCN_unit(16, 128, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(128, num_class) + + if self.model_version == 2: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(32, 128, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(128, num_class) + + if self.model_version == 3: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(32, 64, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(64, num_class) + + if self.model_version == 4: + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(32, num_class) + + if self.model_version == 5: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(32, num_class) + + if self.model_version == 6: # experimento anterior lo llamabamos model v4 + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(16, num_class) + + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) @@ -245,11 +283,32 @@ def forward(self, x, keep_prob=0.9): x = x.view(N, M, V, C, T).permute( 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) - x = self.l1(x, 1.0) + if self.model_version == 0: + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, keep_prob) + if self.model_version == 1: + x = self.l1(x, 1.0) + x = self.l2(x, 1.0) + x = self.l3(x, keep_prob) + if self.model_version == 2: + x = self.l1(x, 1.0) + x = self.l2(x, keep_prob) + if self.model_version == 3: + x = self.l1(x, 1.0) + x = self.l2(x, keep_prob) + if self.model_version == 4: + x = self.l1(x, 1.0) + x = self.l2(x, keep_prob) + if self.model_version == 5: + x = self.l1(x, 1.0) + if self.model_version == 6: + x = self.l1(x, 1.0) # N*M,C,T,V c_new = x.size(1) x = x.reshape(N, M, c_new, -1) x = x.mean(3).mean(1) - return self.fc(x) \ No newline at end of file + return self.fc(x) + \ No newline at end of file diff --git a/SL-GCN/model/models_by_dataset.py b/SL-GCN/model/models_by_dataset.py index 93054ba..87af7a9 100644 --- a/SL-GCN/model/models_by_dataset.py +++ b/SL-GCN/model/models_by_dataset.py @@ -1,79 +1,10 @@ #AEC-29 v1 -class Model_v3(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): - super(Model, self).__init__() - - if graph is None: - raise ValueError() - else: - Graph = import_class(graph) - self.graph = Graph(**graph_args) - self.graph.num_node = num_point - - A = self.graph.A - self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, - block_size, residual=False) - self.fc = nn.Linear(32, num_class) - nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) - bn_init(self.data_bn, 1) - - def forward(self, x, keep_prob=0.9): - N, C, T, V, M = x.size() - x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) - x = self.data_bn(x) - x = x.view(N, M, V, C, T).permute( - 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) - - x = self.l1(x, 1.0) - c_new = x.size(1) - x = x.reshape(N, M, c_new, -1) - x = x.mean(3).mean(1) - - return self.fc(x) - -class Model_v2(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): - super(Model, self).__init__() - if graph is None: - raise ValueError() - else: - Graph = import_class(graph) - self.graph = Graph(**graph_args) - self.graph.num_node = num_point - - A = self.graph.A - self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) - self.l8 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) - self.fc = nn.Linear(32, num_class) - nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) - bn_init(self.data_bn, 1) - - def forward(self, x, keep_prob=0.9): - N, C, T, V, M = x.size() - x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T) - x = self.data_bn(x) - x = x.view(N, M, V, C, T).permute( - 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) - - x = self.l1(x, 1.0) - x = self.l8(x, keep_prob) - # N*M,C,T,V - c_new = x.size(1) - x = x.reshape(N, M, c_new, -1) - x = x.mean(3).mean(1) - - return self.fc(x) - -class Model_v4(nn.Module): - def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2): +class Model(nn.Module): + def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_size=41, graph=None, graph_args=dict(), in_channels=2,model_version=1): super(Model, self).__init__() - + self.model_version = model_version if graph is None: raise ValueError() else: @@ -84,8 +15,27 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) - self.fc = nn.Linear(16, num_class) + + if self.model_version == 1: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(32, num_class) + + if self.model_version == 2: + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.l2 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) + self.fc = nn.Linear(32, num_class) + + if self.model_version == 3: + self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(32, num_class) + + if self.model_version == 4: + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point,block_size, residual=False) + self.fc = nn.Linear(16, num_class) + + + nn.init.normal(self.fc.weight, 0, math.sqrt(2. / num_class)) bn_init(self.data_bn, 1) @@ -96,11 +46,21 @@ def forward(self, x, keep_prob=0.9): x = x.view(N, M, V, C, T).permute( 0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V) - x = self.l1(x, 1.0) + if self.model_version == 1: + x = self.l1(x, 1.0) + x = self.l2(x, keep_prob) + if self.model_version == 2: + x = self.l1(x, 1.0) + x = self.l2(x, keep_prob) + if self.model_version == 3: + x = self.l1(x, 1.0) + if self.model_version == 4: + x = self.l1(x, 1.0) # N*M,C,T,V c_new = x.size(1) x = x.reshape(N, M, c_new, -1) x = x.mean(3).mean(1) - return self.fc(x) \ No newline at end of file + return self.fc(x) + \ No newline at end of file diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index f64854a..e308528 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -12,6 +12,7 @@ declare -a points=(29 51 71 29 51 71 29 51 71) declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") +declare -a model_version=(0 1 2 3 4 5 6) ''' for i in 0 @@ -25,7 +26,7 @@ do done ''' # for j in 4 5 6 7 8 - +''' for i in 0 do for j in 6 7 8 5 4 3 2 1 0 @@ -35,8 +36,18 @@ do python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done +''' -# optimizacion_analysis_aec_29_v4 model v4 -# optimizacion_analysis_aec_29_v5 model v4 -# optimizacion_analysis_v3 model v3 - +######## get number parameters ############ +for w in 0 1 2 3 4 5 6 # model version +do + for i in 0 # seed + do + for j in 0 1 2 3 4 5 6 7 8 # dataset-keypoint + do + python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" + python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done + done +done \ No newline at end of file From 19890dce3841f0b385eccf26a792e965a6db6aad Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 9 Nov 2022 17:42:44 +0000 Subject: [PATCH 51/56] model param is not working --- SL-GCN/main.py | 11 ++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 54 ++++++++++++++++++-- 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index 90e5018..a4e1004 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -1078,6 +1078,7 @@ def import_class(name): "mode_train":arg.mode_train, "seed":arg.seed, "id_iteration":id_iteration, + "model_version":arg.model_version, } import wandb import os @@ -1089,7 +1090,7 @@ def import_class(name): print("WANDB API KEY :",os.environ["WANDB_API_KEY"][:5]) if wandbFlag: if arg.user =='cristian': - wandb.init(project="sign_language_project", + wandb.init(project="sign_language_project",#"sign_language_project", entity="ml_projects", config=config) else: @@ -1139,9 +1140,11 @@ def import_class(name): # {arg.model_saved_directory}-{arg.kp_model}-{arg.database}-Lr{str(arg.base_lr)}-NClasses{str(arg.num_class)}-{str(config['num_points'])} #os.makedirs(arg.file_name,exist_ok=True) - - runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) - + if arg.user == 'cristian': + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-m"+str(arg.model_version)+"-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) + else: + + runAndModelName = arg.kp_model + '-' + arg.database +'-'+str(arg.keypoints_number)+ "-Lr" + str(arg.base_lr)+ "-NClas" + str(arg.num_class) + "-Batch" + str(arg.batch_size)+"-Seed"+str(arg.seed)+"-id"+str(id_iteration) model_name = runAndModelName print('model_name : ',model_name) if wandbFlag: diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index e308528..2534bb5 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -9,12 +9,13 @@ #declare -a lrs=(0.05 0.05 0.05) #declare -a datasets=("PUCP" "AEC" "WLASL") + +''' declare -a points=(29 51 71 29 51 71 29 51 71) declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") -declare -a model_version=(0 1 2 3 4 5 6) -''' + for i in 0 do for j in 1 2 3 4 5 6 7 8 @@ -37,6 +38,12 @@ do done done ''' +''' + +declare -a points=(29 51 71 29 51 71 29 51 71) +declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) +declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") +declare -a model_version=(0 1 2 3 4 5 6) ######## get number parameters ############ for w in 0 1 2 3 4 5 6 # model version @@ -50,4 +57,45 @@ do python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done -done \ No newline at end of file +done + +''' + +''' + +declare -a points=(29 51 71 29 51 71 29 51 71) +declare -a lrs=(0.005 0.005 0.005 0.005 0.005 0.005 0.005 0.005 0.005) +declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") +declare -a model_version=(3 3 4 2 2 3 0 0 1) + + +for i in 0 # seed +do + for j in 6 # dataset-keypoint + do + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done +''' + + +declare -a points=(29 51 71 29 51 71 29 51 71) +declare -a lrs=(0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005) +declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") +declare -a model_version=(3 3 4 2 2 3 0 0 1) + +# wlasl 0.0005 + +# optimizacion_lr${lrs[j]}_v${model_version[j]}_e4000 + +for i in 0 # seed +do + for j in 4 # dataset-keypoint + do + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + done +done From 8251350c607b49b8aa4ecf18c964d9fc759cbb9f Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 9 Nov 2022 14:04:43 -0500 Subject: [PATCH 52/56] cambios locales --- SL-GCN/model/decouple_gcn_attn.py | 4 ++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/SL-GCN/model/decouple_gcn_attn.py b/SL-GCN/model/decouple_gcn_attn.py index f79b64d..4779fab 100644 --- a/SL-GCN/model/decouple_gcn_attn.py +++ b/SL-GCN/model/decouple_gcn_attn.py @@ -233,7 +233,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz A = self.graph.A self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point) - self.l1 = TCN_GCN_unit(in_channels, 32, A, groups, num_point, + self.l1 = TCN_GCN_unit(in_channels, 16, A, groups, num_point, block_size, residual=False) #self.l2 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) #self.l3 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size) @@ -241,7 +241,7 @@ def __init__(self, num_class=60, num_point=25, num_person=2, groups=8, block_siz #self.l5 = TCN_GCN_unit(64, 64, A, groups, num_point, block_size, stride=2) #self.l6 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) #self.l7 = TCN_GCN_unit(128, 128, A, groups, num_point, block_size) - self.l8 = TCN_GCN_unit(32, 32, A, groups,num_point, block_size, stride=2) + self.l8 = TCN_GCN_unit(16, 32, A, groups,num_point, block_size, stride=2) #self.l9 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) #self.l10 = TCN_GCN_unit(256, 256, A, groups, num_point, block_size) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index d3af519..1db2c8d 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -18,7 +18,7 @@ for i in 0 do for j in 0 do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2 --mode_train optimizacion_analysis_aec_29_v1 + python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2 --mode_train optimizacion_analysis_aec_29_v2_local #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" #python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train neurips_51points_v6_reduce --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done From 42c11ff8066d046f5fe4a274e48058eac5cee231 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 9 Nov 2022 14:20:06 -0500 Subject: [PATCH 53/56] models version funciona bien --- SL-GCN/config/sign/train/train_joint.yaml | 2 +- SL-GCN/main.py | 7 +- SL-GCN/runModelTest_optimizacion_analysis.sh | 84 +------------------- 3 files changed, 7 insertions(+), 86 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 97a64c7..621dd51 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -39,7 +39,7 @@ base_lr: 0.005 step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x :-> epoch > step) # training -device: [0, 1] +device: [0] #device: [0] keep_rate: 0.9 only_train_epoch: 1 diff --git a/SL-GCN/main.py b/SL-GCN/main.py index a4e1004..f2df726 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -1119,11 +1119,8 @@ def import_class(name): arg.num_epoch = config["num-epoch"] arg.kp_model = config["kp-model"] arg.database = config["database"] - - if arg.user =='cristian': - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" - else: - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" + + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" arg.work_dir = "work_dir/"+arg.experiment_name +"/" print('*'*20) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 2534bb5..913d9ed 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -5,82 +5,6 @@ #num_class: 28 # AEC=28, PUCP=29 , WLASL=86 #num_point: 29 # 29 or 71 or 51 -#declare -a points=(51 51 51) -#declare -a lrs=(0.05 0.05 0.05) -#declare -a datasets=("PUCP" "AEC" "WLASL") - - -''' -declare -a points=(29 51 71 29 51 71 29 51 71) -declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) -declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") - - -for i in 0 -do - for j in 1 2 3 4 5 6 7 8 - do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 600 --mode_train optimizacion_analysis_aec_29_v5 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done -''' -# for j in 4 5 6 7 8 -''' -for i in 0 -do - for j in 6 7 8 5 4 3 2 1 0 - do - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1200 --mode_train optimizacion_analysis_v4 --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done -''' -''' - -declare -a points=(29 51 71 29 51 71 29 51 71) -declare -a lrs=(0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05 0.05) -declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") -declare -a model_version=(0 1 2 3 4 5 6) - -######## get number parameters ############ -for w in 0 1 2 3 4 5 6 # model version -do - for i in 0 # seed - do - for j in 0 1 2 3 4 5 6 7 8 # dataset-keypoint - do - python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" - python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --model_version ${model_version[w]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "optimizacion_analysis_get_ratio_v${model_version[w]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done - done -done - -''' - -''' - -declare -a points=(29 51 71 29 51 71 29 51 71) -declare -a lrs=(0.005 0.005 0.005 0.005 0.005 0.005 0.005 0.005 0.005) -declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") -declare -a model_version=(3 3 4 2 2 3 0 0 1) - - -for i in 0 # seed -do - for j in 6 # dataset-keypoint - do - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2000 --mode_train "optimizacion_lr${lrs[j]}_v${model_version[j]}" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - done -done -''' - - declare -a points=(29 51 71 29 51 71 29 51 71) declare -a lrs=(0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005) declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") @@ -92,10 +16,10 @@ declare -a model_version=(3 3 4 2 2 3 0 0 1) for i in 0 # seed do - for j in 4 # dataset-keypoint + for j in 0 # dataset-keypoint do - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i-_lr${lrs[j]}_v${model_version[j]}" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From 2e8a5f61b901574c23621f02b031b1a39a58b3c3 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Wed, 9 Nov 2022 19:42:30 +0000 Subject: [PATCH 54/56] optimizacion modelos mas ligeros --- SL-GCN/config/sign/train/train_joint.yaml | 2 +- SL-GCN/runModelTest_optimizacion_analysis.sh | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/SL-GCN/config/sign/train/train_joint.yaml b/SL-GCN/config/sign/train/train_joint.yaml index 621dd51..97a64c7 100644 --- a/SL-GCN/config/sign/train/train_joint.yaml +++ b/SL-GCN/config/sign/train/train_joint.yaml @@ -39,7 +39,7 @@ base_lr: 0.005 step: [] #[50, 100, 150, 200] # To modify the learning rate => lr * 0.1**Sum(x :-> epoch > step) # training -device: [0] +device: [0, 1] #device: [0] keep_rate: 0.9 only_train_epoch: 1 diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 913d9ed..4a28554 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -8,7 +8,8 @@ declare -a points=(29 51 71 29 51 71 29 51 71) declare -a lrs=(0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005 0.0005) declare -a datasets=("AEC" "AEC" "AEC" "PUCP" "PUCP" "PUCP" "WLASL" "WLASL" "WLASL") -declare -a model_version=(3 3 4 2 2 3 0 0 1) +declare -a model_version=(3 4 5 3 3 4 1 1 2) +#declare -a model_version=(3 3 4 2 2 3 0 0 1) # wlasl 0.0005 @@ -16,10 +17,10 @@ declare -a model_version=(3 3 4 2 2 3 0 0 1) for i in 0 # seed do - for j in 0 # dataset-keypoint + for j in 4 5 6 7 8 0 1 2 3# dataset-keypoint do - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 1 --mode_train "modelos_optimizados_test_run2" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done done From cd8a20e189707dd45d72287f68a0063b7b9c34e4 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Thu, 10 Nov 2022 02:18:26 +0000 Subject: [PATCH 55/56] model optmizacion --- SL-GCN/main.py | 7 +++++-- SL-GCN/runModelTest_optimizacion_analysis.sh | 12 +++++++----- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/SL-GCN/main.py b/SL-GCN/main.py index f2df726..8c6e6fa 100644 --- a/SL-GCN/main.py +++ b/SL-GCN/main.py @@ -1119,8 +1119,11 @@ def import_class(name): arg.num_epoch = config["num-epoch"] arg.kp_model = config["kp-model"] arg.database = config["database"] - - arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" + + if arg.user == "cristian": + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/" + else: + arg.model_saved_directory = "save_models/"+arg.experiment_name+"/"+now+"/" arg.work_dir = "work_dir/"+arg.experiment_name +"/" print('*'*20) diff --git a/SL-GCN/runModelTest_optimizacion_analysis.sh b/SL-GCN/runModelTest_optimizacion_analysis.sh index 4a28554..2e93d38 100644 --- a/SL-GCN/runModelTest_optimizacion_analysis.sh +++ b/SL-GCN/runModelTest_optimizacion_analysis.sh @@ -14,13 +14,15 @@ declare -a model_version=(3 4 5 3 3 4 1 1 2) # wlasl 0.0005 # optimizacion_lr${lrs[j]}_v${model_version[j]}_e4000 +# 0 1 2 3 4 5 6 7 8 +# 29 51 71 29 51 71 29 51 71 for i in 0 # seed do - for j in 4 5 6 7 8 0 1 2 3# dataset-keypoint + for j in 5 8 0 2 # dataset-keypoint do - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" - python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 2500 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model wholepose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3000 --mode_train "modelos_optimizados" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/mediapipe-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model mediapipe --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3000 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" + python main.py --seed $i --model_version ${model_version[j]} --experiment_name "results/${points[j]}/${datasets[j]}/openpose-${datasets[j]}-s-$i" --database ${datasets[j]} --keypoints_model openpose --base_lr ${lrs[j]} --keypoints_number ${points[j]} --num_epoch 3000 --mode_train "modelos_optimizados" --weights "save_models/results/${points[j]}/${datasets[j]}/wholepose-${datasets[j]}-s-$i/wholepose-${datasets[j]}-${points[j]}-$i-init.pt" done -done +done From db890c8a9e2e5db652667f96a968fc2bfb5b99e9 Mon Sep 17 00:00:00 2001 From: CristianLazoQuispe Date: Thu, 10 Nov 2022 02:20:59 +0000 Subject: [PATCH 56/56] model optmizacion --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 4e8fc2a..08e79e3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +*.log +*.pt *.env data/ save_models/