# Old calls, use versions with -p nomystery... if redoing
./fast-sample.py --traverse ROOT -d REGEX -p REGEX -e EXECUTABLE -a "EXECUTABLE ARGUMENTS" --array --dry --sample SEARCH_ARGS

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(suffix=.uniform.data.gz)" --state-select INIT

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(suffix=inter_uniform.data.gz)" --state-select INTER

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(suffix=.inter_uniform.data.gz)" --state-select INTER

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -p "^((?!nomystery_fix)[^/]*/)+((?!nomystery_fix)[^/]*)$" -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(suffix=.inter_uniform.sat.data.gz)" --satisficing --state-select INTER

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -p '^((?!nomystery_fix)[^/]*/)+((?!nomystery_fix)[^/]*)$' -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(suffix=.uniform.sat.data.gz)" --satisficing  --state-select INIT

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/transport_var_roads -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(file={FirstProblem:-5}.generator.sat.data.gz)" --satisficing --state-select INIT --generator AUTO AUTO 

./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/transport_var_roads -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --dry --sample -p -o "gzip(file={FirstProblem:-5}.generator.opt.data.gz)" --state-select INIT --generator AUTO AUTO 

#FIND BUG/ERROR
./fast-sample.py --traverse ../DeePDown/data/FixedWorlds/opt/ -p "^((?!nomystery_fix)[^/]*/)+((?!nomystery_fix)[^/]*)$" -e sbatch -a "--export=ALL ./misc/slurm/slurm-sample.sh" --array --skip-exists .inter_uniform.sat.data.gz --dry --sample -p -o "gzip(suffix=.inter_uniform.sat.data.gz)" --satisficing

# Local test network
./fast-training.py "keras_dyn_mlp(tparams=ktparams(epochs=1000,loss=mean_squared_error,batch=100,balance=False,optimizer=adam,callbacks=[keras_model_checkpoint(val_loss,./checkpoint.ckp),keras_progress_checking(acc,100,0.001,False),keras_early_stopping(val_loss,0.01,100),keras_restart(-1,stop_successful=True),keras_stoptimer(max_time=86400,per_training=False,prevent_reinit=True,timeout_as_failure=True)]),hidden=5,output_units=-1,dropout=0,x_fields=[current_state,goals],y_fields=[hplan],formats=[hdf5,protobuf],graphdef=graphdef.txt,count_samples=True)" --prefix tmp_ -d ../DeePDown/data/FixedWorlds/opt/transport_var_roads/c10_t2_p2/ --input "gzip(suffix=.uniform.data.gz)" -o -n model --fields goals hplan current_state --skip --dry

## NETWORKS
# MLP 5 hidden layers
./fast-training.py "keras_dyn_mlp(hidden=5,output_units=-1,x_fields=[current_state,goals],y_fields=hplan,formats=[hdf5,protobuf],graphdef=graphdef.txt,count_samples=True,callbacks=[keras_model_checkpoint(val_loss,./checkpoint.ckp),keras_progress_checking(acc,20,0.001,-1,False),keras_early_stopping(val_loss,0.01,100)])" --prefix uniform_mlp_h5_ -d ../DeePDown/data/FixedWorlds/opt -sdt --slurm --cross-validation 10 -a "--export=ALL ./misc/slurm/slurm-training.sh" --input "gzip(suffix=.uniform.data.gz)" -o -n model --fields current_state goals hplan --skip --skip-if-previously-trained --dry


./fast-training.py "keras_dyn_mlp(hidden=5,output_units=-1,x_fields=[current_state,goals],y_fields=hplan,formats=[hdf5,protobuf],graphdef=graphdef.txt,count_samples=True,callbacks=[keras_model_checkpoint(val_loss,./checkpoint.ckp),keras_progress_checking(acc,20,0.001,-1,False),keras_early_stopping(val_loss,0.01,100)])" --prefix uniform_ns_mlp_h5_ -d ../DeePDown/data/FixedWorlds/opt -sdt --slurm --cross-validation 10 -a "--export=ALL ./misc/slurm/slurm-training.sh" --input "gzip(suffix=.uniform.data.gz)" -o -n model --fields current_state goals hplan --skip --format NonStatic_A_01 -dp --skip-if-previously-trained --dry



# MLP 15 hidden layers
./fast-training.py "keras_dyn_mlp(hidden=15,output_units=-1,x_fields=[current_state,goals],y_fields=hplan,formats=[hdf5,protobuf],graphdef=graphdef.txt,count_samples=True,callbacks=[keras_model_checkpoint(val_loss,./checkpoint.ckp),keras_progress_checking(acc,20,0.001,-1,False),keras_early_stopping(val_loss,0.01,100)])" --prefix uniform_mlp_h15_ -d ../DeePDown/data/FixedWorlds/opt -sdt --slurm --cross-validation 10 -a "--export=ALL ./misc/slurm/slurm-training.sh" --input "gzip(suffix=.uniform.data.gz)" -o -n model --fields current_state goals hplan --skip --skip-if-previously-trained --dry


./fast-training.py "keras_dyn_mlp(hidden=15,output_units=-1,x_fields=[current_state,goals],y_fields=hplan,formats=[hdf5,protobuf],graphdef=graphdef.txt,count_samples=True,callbacks=[keras_model_checkpoint(val_loss,./checkpoint.ckp),keras_progress_checking(acc,20,0.001,-1,False),keras_early_stopping(val_loss,0.01,100)])" --prefix uniform_ns_mlp_h15_ -d ../DeePDown/data/FixedWorlds/opt -sdt --slurm --cross-validation 10 -a "--export=ALL ./misc/slurm/slurm-training.sh" --input "gzip(suffix=.uniform.data.gz)" -o -n model --fields current_state goals hplan --skip --format NonStatic_A_01 -dp --skip-if-previously-trained --dry






# Call for Neural Network in FD
#Do some dual queue?
#output_layers: use final layer/ACTIVATION or look for name of input to layer loss/...
# --pb-network eager_greedy([nh(sgnet(path={PDIR}prefix{FOLD}_model.pb,type=regression,state_layer=input_1,goal_layer=input_2,output_layers=[dense_6/Relu],atoms={PDDL_ATOMS},defaults={PDDL_INITS}))], transform=adapt_costs())
# --pb-network eager_greedy([nh(sgnet(path={PDIR}reg_full_ubal_h5_sigmoid_init_drp0_{FOLD}_model.pb,type=regression,state_layer=input_1,goal_layer=input_2,output_layers=[dense_6/Relu],atoms={PDDL_ATOMS},defaults={PDDL_INITS}))], transform=adapt_costs())
#? eager_greedy([nh(sgnet(path=?,type=regression,state_layer=input_1,goal_layer=input_2,output_layers=[?],atoms=[],defaults=[]))], transform=adapt_costs())
