-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathsigmoid_hypsol_dyn_gfn.yaml
65 lines (59 loc) · 1.41 KB
/
sigmoid_hypsol_dyn_gfn.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# @package _global_
#
# to execute this experiment run:
# python train.py experiment=tcg
defaults:
- override /model: per_node_hyper_tcg #hyper_tcg
- override /datamodule: linear_unidentifiable_velocity #linear_velocity
- override /logger:
- csv
- wandb
- override /trainer: gpu
name: "hyper_per_node_sigmoid_tcg_gfn"
seed: 0
# for experiments seed=13,29,42,73,91
datamodule:
batch_size: 100 #500
T: 2
p: 20
vars_to_deidentify: [0, 1, 2]
sparsity: 0.9 # 0.9 --> 1024 Nodes for p=20 and [0,1,2]
system: "sigmoid_linear"
sigma: 0
burn_in: 1
seed: 13
# best
model:
env_batch_size: 256
eval_batch_size: 2500
full_posterior_eval: False
uniform_backwards: True
debug_use_shd_energy: False
analytic_use_simple_mse_energy: False
per_node_gfn: True
loss_fn: "detailed_balance"
n_steps: 0
arch: "mlp"
alpha: 0
temperature: 0.005
temper_period: 5
prior_lambda: 150 #300 # 10
beta: 0.01
confidence: 0.0
hidden_dim: 128
gfn_freq: 5 # may depend on batch-size
energy_freq: 5 # may depend on batch-size
load_pretrain: True
pretraining_epochs: 0
lr: 1e-5
hyper: "per_node_mlp"
hyper_hidden_dim: [64, 64, 64]
bias: True
path: "/h/lazar/dyn-gfn/logs/experiments/models/per_node_sigmoid_tcg/last.ckpt"
trainer:
max_epochs: 1000
min_epochs: 1000
check_val_every_n_epoch: 5
logger:
wandb:
tags: ["hyper", "sigmoid", "per-node", "gfn", "${name}", "v_hyp_fix"]