-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
46 lines (45 loc) · 1.69 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import torch
class Args():
def __init__(self):
self.input_dir = 'data/save/few_shot.pickle'
self.output_dir = 'output-small'
self.model_type = 'gpt2'
self.model_name_or_path = 'microsoft/DialoGPT-small'
self.config_name = 'microsoft/DialoGPT-small'
self.tokenizer_name = 'microsoft/DialoGPT-small'
self.cache_dir = 'cached'
self.block_size = 512
self.do_eval = True
self.per_gpu_train_batch_size = 4
self.per_gpu_eval_batch_size = 4
self.gradient_accumulation_steps = 1
self.learning_rate = 5e-5
self.weight_decay = 0.0
self.adam_epsilon = 1e-8
self.max_grad_norm = 1.0
self.num_train_epochs = 3
self.max_steps = -1
self.warmup_steps = 0
self.logging_steps = 1000
self.save_steps = 3500
self.save_total_limit = None
self.eval_all_checkpoints = False
self.no_cuda = False
self.overwrite_output_dir = True
self.overwrite_cache = True
self.should_continue = False
self.local_rank = -1
self.fp16 = False
self.fp16_opt_level = 'O1'
self.device = torch.device("cuda")
# self.n_gpu = torch.cuda.device_count()
self.n_gpu = 1
self.nli = {"model_card": "tals/albert-xlarge-vitaminc-mnli", "entailment_idx": 0, "contradiction_idx": 1}
self.do_train = True
self.seed = 4
self.evaluate_during_training = False # Model-Agnostic Meta-Learning (MAML)
self.constractive = True # Contrastive Learning
self.constractive_lambada = 0.1
self.zero_shot = False
self.use_prompts = False
self.oracle = False