diff --git a/configs/callbacks/csv_prediction_writer.yaml b/configs/callbacks/csv_prediction_writer.yaml deleted file mode 100644 index b7fc6fb93940f0910938553caec31c02a3aeb60b..0000000000000000000000000000000000000000 --- a/configs/callbacks/csv_prediction_writer.yaml +++ /dev/null @@ -1,4 +0,0 @@ -csv_prediction_writer: - _target_: deepscreen.utils.lightning.CSVPredictionWriter - output_dir: ${paths.output_dir} - write_interval: batch diff --git a/configs/callbacks/default.yaml b/configs/callbacks/default.yaml deleted file mode 100644 index f94c639e898f22416a21af826e6453b99fbf01f1..0000000000000000000000000000000000000000 --- a/configs/callbacks/default.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - model_checkpoint - - early_stopping - - model_summary - - rich_progress_bar diff --git a/configs/callbacks/early_stopping.yaml b/configs/callbacks/early_stopping.yaml deleted file mode 100644 index 1d2bb37ad3f056b1fb6cff81f162f7222a3c5a3e..0000000000000000000000000000000000000000 --- a/configs/callbacks/early_stopping.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.callbacks.EarlyStopping.html - -# Monitor a metric and stop training when it stops improving. -# Look at the above link for more detailed information. -early_stopping: - _target_: lightning.pytorch.callbacks.EarlyStopping - monitor: ${oc.select:callbacks.model_checkpoint.monitor,"val/loss"} # quantity to be monitored, must be specified!!! - min_delta: 0. # minimum change in the monitored quantity to qualify as an improvement - patience: 50 # number of checks with no improvement after which training will be stopped - verbose: False # verbosity mode - mode: ${callbacks.model_checkpoint.mode} # "max" means higher metric value is better, can be also "min" - strict: True # whether to crash the training if monitor is not found in the validation metrics - check_finite: True # when set True, stops training when the monitor becomes NaN or infinite - stopping_threshold: null # stop training immediately once the monitored quantity reaches this threshold - divergence_threshold: null # stop training as soon as the monitored quantity becomes worse than this threshold - check_on_train_epoch_end: False # whether to run early stopping at the end of the training epoch - log_rank_zero_only: False # logs the status of the early stopping callback only for rank 0 process diff --git a/configs/callbacks/inference.yaml b/configs/callbacks/inference.yaml deleted file mode 100644 index 22fe5db38092387f72718e3046e86f57663e8c00..0000000000000000000000000000000000000000 --- a/configs/callbacks/inference.yaml +++ /dev/null @@ -1,6 +0,0 @@ -defaults: - - model_summary - - rich_progress_bar - -model_summary: - max_depth: 2 diff --git a/configs/callbacks/model_checkpoint.yaml b/configs/callbacks/model_checkpoint.yaml deleted file mode 100644 index f8e51b2bb6036ce4a0b1ce3dc47c2869514b2eeb..0000000000000000000000000000000000000000 --- a/configs/callbacks/model_checkpoint.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.callbacks.ModelCheckpoint.html - -# Save the model periodically by monitoring a quantity. -# Look at the above link for more detailed information. -model_checkpoint: - _target_: lightning.pytorch.callbacks.ModelCheckpoint - dirpath: ${paths.output_dir} # directory to save the model file - filename: "checkpoints/epoch_{epoch:03d}" # checkpoint filename - monitor: ${eval:'"val/loss" if ${data.train_val_test_split}[1] else "train/loss"'} # name of the logged metric which determines when model is improving - verbose: False # verbosity mode - save_last: True # additionally always save an exact copy of the last checkpoint to a file last.ckpt - save_top_k: 1 # save k best models (determined by above metric) - mode: "min" # "max" means higher metric value is better, can be also "min" - auto_insert_metric_name: False # when True, the checkpoints filenames will contain the metric name - save_weights_only: False # if True, then only the model’s weights will be saved - every_n_train_steps: null # number of training steps between checkpoints - train_time_interval: null # checkpoints are monitored at the specified time interval - every_n_epochs: null # number of epochs between checkpoints - save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation diff --git a/configs/callbacks/model_summary.yaml b/configs/callbacks/model_summary.yaml deleted file mode 100644 index f854fd9fd17863a8c5930f482dc0d235cfc03698..0000000000000000000000000000000000000000 --- a/configs/callbacks/model_summary.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.callbacks.RichModelSummary.html - -# Generates a summary of all layers in a LightningModule with rich text formatting. -# Look at the above link for more detailed information. -model_summary: - _target_: lightning.pytorch.callbacks.RichModelSummary - max_depth: 2 # The maximum depth of layer nesting that the summary will include. `-1` for all modules `0` for none. diff --git a/configs/callbacks/none.yaml b/configs/callbacks/none.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/callbacks/rich_progress_bar.yaml b/configs/callbacks/rich_progress_bar.yaml deleted file mode 100644 index 82d2f89d28be7511f2360e1b047656afeee16141..0000000000000000000000000000000000000000 --- a/configs/callbacks/rich_progress_bar.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.callbacks.RichProgressBar.html - -# Create a progress bar with rich text formatting. -# Look at the above link for more detailed information. -rich_progress_bar: - _target_: lightning.pytorch.callbacks.RichProgressBar diff --git a/configs/data/collator/default.yaml b/configs/data/collator/default.yaml deleted file mode 100644 index 514c0837c358aeb82d22ec2c34a7226d05580423..0000000000000000000000000000000000000000 --- a/configs/data/collator/default.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.data.utils.collator.collate_fn -_partial_: true - -automatic_padding: false -padding_value: 0.0 diff --git a/configs/data/collator/none.yaml b/configs/data/collator/none.yaml deleted file mode 100644 index 29899b9fc6b0164c149e17b31e350be9135add49..0000000000000000000000000000000000000000 --- a/configs/data/collator/none.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: deepscreen.utils.passthrough -_partial_: true \ No newline at end of file diff --git a/configs/data/drug_featurizer/ecfp.yaml b/configs/data/drug_featurizer/ecfp.yaml deleted file mode 100644 index a341607f3f242e6f993bf758e7341b376f8b4ef3..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/ecfp.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.data.featurizers.fingerprint.smiles_to_fingerprint -_partial_: true - -fingerprint: MorganFP -nBits: 1024 -radius: 2 \ No newline at end of file diff --git a/configs/data/drug_featurizer/fcs.yaml b/configs/data/drug_featurizer/fcs.yaml deleted file mode 100644 index 77a0cdbec7168829ce846fa28543707a000d0c0d..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/fcs.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: deepscreen.data.featurizers.fcs.drug_to_embedding -_partial_: true - -max_sequence_length: 205 \ No newline at end of file diff --git a/configs/data/drug_featurizer/graph.yaml b/configs/data/drug_featurizer/graph.yaml deleted file mode 100644 index d3d65a1fbec937c9484584d8f9f89bbc999539f9..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/graph.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: deepscreen.data.featurizers.graph.smiles_to_graph -_partial_: true \ No newline at end of file diff --git a/configs/data/drug_featurizer/label.yaml b/configs/data/drug_featurizer/label.yaml deleted file mode 100644 index 80ffe2fbdf7038953a8d94e4fa9def36196d66bb..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/label.yaml +++ /dev/null @@ -1,15 +0,0 @@ -#_target_: deepscreen.data.featurizers.categorical.smiles_to_label -#_partial_: true -# -#max_sequence_length: 100 -##in_channels: 63 - -_target_: deepscreen.data.featurizers.categorical.sequence_to_label -_partial_: true -charset: ['#', '%', ')', '(', '+', '-', '.', '1', '0', '3', '2', '5', '4', - '7', '6', '9', '8', '=', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', - 'H', 'K', 'M', 'L', 'O', 'N', 'P', 'S', 'R', 'U', 'T', 'W', 'V', - 'Y', '[', 'Z', ']', '_', 'a', 'c', 'b', 'e', 'd', 'g', 'f', 'i', - 'h', 'm', 'l', 'o', 'n', 's', 'r', 'u', 't', 'y'] - -max_sequence_length: 100 \ No newline at end of file diff --git a/configs/data/drug_featurizer/mol_features.yaml b/configs/data/drug_featurizer/mol_features.yaml deleted file mode 100644 index b0652eace74a5041d7bfa166152e347b9f009c64..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/mol_features.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: deepscreen.data.featurizers.graph.smiles_to_mol_features -_partial_: true - -num_atom_feat: 34 diff --git a/configs/data/drug_featurizer/none.yaml b/configs/data/drug_featurizer/none.yaml deleted file mode 100644 index 29899b9fc6b0164c149e17b31e350be9135add49..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/none.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: deepscreen.utils.passthrough -_partial_: true \ No newline at end of file diff --git a/configs/data/drug_featurizer/onehot.yaml b/configs/data/drug_featurizer/onehot.yaml deleted file mode 100644 index 827114b5c04984b2bbd78697dda712a109db3514..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/onehot.yaml +++ /dev/null @@ -1,15 +0,0 @@ -#_target_: deepscreen.data.featurizers.categorical.smiles_to_onehot -#_partial_: true -# -#max_sequence_length: 100 -##in_channels: 63 - -_target_: deepscreen.data.featurizers.categorical.sequence_to_onehot -_partial_: true -charset: ['#', '%', ')', '(', '+', '-', '.', '1', '0', '3', '2', '5', '4', - '7', '6', '9', '8', '=', 'A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', - 'H', 'K', 'M', 'L', 'O', 'N', 'P', 'S', 'R', 'U', 'T', 'W', 'V', - 'Y', '[', 'Z', ']', '_', 'a', 'c', 'b', 'e', 'd', 'g', 'f', 'i', - 'h', 'm', 'l', 'o', 'n', 's', 'r', 'u', 't', 'y'] - -max_sequence_length: 100 \ No newline at end of file diff --git a/configs/data/drug_featurizer/tokenizer.yaml b/configs/data/drug_featurizer/tokenizer.yaml deleted file mode 100644 index 369aa42da9f2a5f3990dff0097afd0fe8cf5da55..0000000000000000000000000000000000000000 --- a/configs/data/drug_featurizer/tokenizer.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.data.featurizers.token.sequence_to_token_ids -_partial_: true - -tokenizer: - _target_: deepscreen.data.featurizers.token.SmilesTokenizer - vocab_file: resources/vocabs/smiles.txt diff --git a/configs/data/dti.yaml.bak b/configs/data/dti.yaml.bak deleted file mode 100644 index 93682ed7b88a14ac5e7afecb4327aa50d51d29b8..0000000000000000000000000000000000000000 --- a/configs/data/dti.yaml.bak +++ /dev/null @@ -1,21 +0,0 @@ -_target_: deepscreen.data.dti_datamodule.DTIdatamodule - -defaults: - - _self_ - - split: null - - drug_featurizer: null - - protein_featurizer: null - -task: ${task.task} -n_class: ${oc.select:task.task.n_class,null} - -data_dir: ${paths.data_dir} -dataset_name: null - -batch_size: 16 -train_val_test_split: [0.7, 0.1, 0.2] - -num_workers: 0 -pin_memory: false - -train: ${train} \ No newline at end of file diff --git a/configs/data/dti_data.yaml b/configs/data/dti_data.yaml deleted file mode 100644 index 5b65659c15f8fac1a229e16586ffb226293b4f14..0000000000000000000000000000000000000000 --- a/configs/data/dti_data.yaml +++ /dev/null @@ -1,20 +0,0 @@ -_target_: deepscreen.data.dti.DTIDataModule - -defaults: - - split: null - - drug_featurizer: none # ??? - - protein_featurizer: none # ??? - - collator: default - -task: ${task.task} -num_classes: ${task.num_classes} - -data_dir: ${paths.data_dir} -data_file: null -train_val_test_split: null - -batch_size: ??? -num_workers: 0 -pin_memory: false - -#train: ${train} \ No newline at end of file diff --git a/configs/data/protein_featurizer/fcs.yaml b/configs/data/protein_featurizer/fcs.yaml deleted file mode 100644 index cbf1f380d2bd951a321971676cb20f7de5e17e7c..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/fcs.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: deepscreen.data.featurizers.fcs.protein_to_embedding -_partial_: true - -max_sequence_length: 545 \ No newline at end of file diff --git a/configs/data/protein_featurizer/label.yaml b/configs/data/protein_featurizer/label.yaml deleted file mode 100644 index 1824297352e8d0706126486f1a00585e78c4f892..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/label.yaml +++ /dev/null @@ -1,12 +0,0 @@ -#_target_: deepscreen.data.featurizers.categorical.fasta_to_label -#_partial_: true -# -#max_sequence_length: 1000 -##in_channels: 26 - -_target_: deepscreen.data.featurizers.categorical.sequence_to_label -_partial_: true -charset: ['A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L', 'O', - 'N', 'Q', 'P', 'S', 'R', 'U', 'T', 'W', 'V', 'Y', 'X', 'Z'] - -max_sequence_length: 1000 \ No newline at end of file diff --git a/configs/data/protein_featurizer/none.yaml b/configs/data/protein_featurizer/none.yaml deleted file mode 100644 index 29899b9fc6b0164c149e17b31e350be9135add49..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/none.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: deepscreen.utils.passthrough -_partial_: true \ No newline at end of file diff --git a/configs/data/protein_featurizer/onehot.yaml b/configs/data/protein_featurizer/onehot.yaml deleted file mode 100644 index d44e8a5fcc0ac0cfd47e17fda8f302e73bcc359d..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/onehot.yaml +++ /dev/null @@ -1,12 +0,0 @@ -#_target_: deepscreen.data.featurizers.categorical.fasta_to_onehot -#_partial_: true -# -#max_sequence_length: 1000 -##in_channels: 26 - -_target_: deepscreen.data.featurizers.categorical.sequence_to_onehot -_partial_: true -charset: ['A', 'C', 'B', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L', 'O', - 'N', 'Q', 'P', 'S', 'R', 'U', 'T', 'W', 'V', 'Y', 'X', 'Z'] - -max_sequence_length: 1000 \ No newline at end of file diff --git a/configs/data/protein_featurizer/tokenizer.yaml b/configs/data/protein_featurizer/tokenizer.yaml deleted file mode 100644 index 9e264020782f84719e5370dd3cfc10f44a7a236c..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/tokenizer.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.data.featurizers.token.sequence_to_token_ids -_partial_: true - -tokenizer: - _target_: tape.TAPETokenizer.from_pretrained - vocab: iupac diff --git a/configs/data/protein_featurizer/word2vec.yaml b/configs/data/protein_featurizer/word2vec.yaml deleted file mode 100644 index 0d9f4761a374a05ce0f5b20cd80073632f0c030f..0000000000000000000000000000000000000000 --- a/configs/data/protein_featurizer/word2vec.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.data.featurizers.word.protein_to_word_embedding -_partial_: true - -model: - _target_: gensim.models.Word2Vec.load - fname: ${paths.resource_dir}/models/word2vec_30.model \ No newline at end of file diff --git a/configs/data/split/cold_drug.yaml b/configs/data/split/cold_drug.yaml deleted file mode 100644 index 3c0416a876682bb9759822d493e70a21ac0f28f6..0000000000000000000000000000000000000000 --- a/configs/data/split/cold_drug.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: deepscreen.data.utils.split.cold_start -_partial_: true - -entity: drug \ No newline at end of file diff --git a/configs/data/split/cold_protein.yaml b/configs/data/split/cold_protein.yaml deleted file mode 100644 index 7a3694841cc5cb00549d3563791a872781d0d775..0000000000000000000000000000000000000000 --- a/configs/data/split/cold_protein.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: deepscreen.data.utils.split.cold_start -_partial_: true - -entity: protein \ No newline at end of file diff --git a/configs/data/split/none.yaml b/configs/data/split/none.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/data/split/random.yaml b/configs/data/split/random.yaml deleted file mode 100644 index e8c00c9b5b38f57fda3b03185a1e91dbefedff8c..0000000000000000000000000000000000000000 --- a/configs/data/split/random.yaml +++ /dev/null @@ -1,10 +0,0 @@ -#_target_: torch.utils.data.random_split -#_partial_: true - -#generator: -# _target_: torch.Generator # will use global seed set by lightning.seed_everything or torch.manual_seed automatically - -_target_: deepscreen.data.utils.split.random_split -_partial_: true - -seed: ${seed} diff --git a/configs/data/transform/minmax.yaml b/configs/data/transform/minmax.yaml deleted file mode 100644 index 3c4e68561d5768d6b1677900484c386ce3b8df7c..0000000000000000000000000000000000000000 --- a/configs/data/transform/minmax.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.data.utils.transform -_partial_: true - -scaler: - _target_: sklearn.preprocessing.MinMaxScaler diff --git a/configs/data/transform/none.yaml b/configs/data/transform/none.yaml deleted file mode 100644 index 29899b9fc6b0164c149e17b31e350be9135add49..0000000000000000000000000000000000000000 --- a/configs/data/transform/none.yaml +++ /dev/null @@ -1,2 +0,0 @@ -_target_: deepscreen.utils.passthrough -_partial_: true \ No newline at end of file diff --git a/configs/debug/advanced.yaml b/configs/debug/advanced.yaml deleted file mode 100644 index 2c249cc45699de1d1d8f25aa7c873b127e3df6f9..0000000000000000000000000000000000000000 --- a/configs/debug/advanced.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# @package _global_ - -# advanced debug mode that enables callbacks, loggers and gpu during debugging -job_name: "debug" - -extras: - ignore_warnings: False - enforce_tags: False - -hydra: - job_logging: - root: - level: DEBUG - verbose: True - -trainer: - max_epochs: 1 - accelerator: gpu - devices: 1 - detect_anomaly: true - deterministic: false - -data: - num_workers: 0 - pin_memory: False diff --git a/configs/debug/default.yaml b/configs/debug/default.yaml deleted file mode 100644 index 84e7dc1e80b355c8ab075a28e65d8fae08127eea..0000000000000000000000000000000000000000 --- a/configs/debug/default.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# @package _global_ - -# default debugging setup, runs 1 full epoch -# other debugging configs can inherit from this one - -# overwrite job name so debugging logs are stored in separate folder -job_name: "debug" - -# disable callbacks and loggers during debugging -callbacks: null -logger: null - -extras: - ignore_warnings: False - enforce_tags: False - -# sets level of all command line loggers to 'DEBUG' -# https://hydra.cc/docs/tutorials/basic/running_your_app/logging/ -hydra: - job_logging: - root: - level: DEBUG - # use this to also set hydra loggers to 'DEBUG' - verbose: True - -trainer: - max_epochs: 1 - accelerator: cpu # debuggers don't like gpus - devices: 1 # debuggers don't like multiprocessing - detect_anomaly: true # raise exception if NaN or +/-inf is detected in any tensor - deterministic: false - -data: - num_workers: 0 # debuggers don't like multiprocessing - pin_memory: False # disable gpu memory pin diff --git a/configs/debug/fdr.yaml b/configs/debug/fdr.yaml deleted file mode 100644 index 111e2f35924bf77db6706867a5a381cb90e2e855..0000000000000000000000000000000000000000 --- a/configs/debug/fdr.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# @package _global_ - -# runs 1 train, 1 validation and 1 test step - -defaults: - - default - -trainer: - accelerator: gpu - fast_dev_run: true - detect_anomaly: true diff --git a/configs/debug/fdr_advanced.yaml b/configs/debug/fdr_advanced.yaml deleted file mode 100644 index 570718adfc2148760067ac799cdc3f4ae38f6c78..0000000000000000000000000000000000000000 --- a/configs/debug/fdr_advanced.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# @package _global_ - -# runs 1 train, 1 validation and 1 test step - -defaults: - - advanced - -trainer: - accelerator: gpu - fast_dev_run: true - detect_anomaly: true \ No newline at end of file diff --git a/configs/debug/limit.yaml b/configs/debug/limit.yaml deleted file mode 100644 index 514d77fbd1475b03fff0372e3da3c2fa7ea7d190..0000000000000000000000000000000000000000 --- a/configs/debug/limit.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# @package _global_ - -# uses only 1% of the training data and 5% of validation/test data - -defaults: - - default - -trainer: - max_epochs: 3 - limit_train_batches: 0.01 - limit_val_batches: 0.05 - limit_test_batches: 0.05 diff --git a/configs/debug/overfit.yaml b/configs/debug/overfit.yaml deleted file mode 100644 index 9906586a67a12aa81ff69138f589a366dbe2222f..0000000000000000000000000000000000000000 --- a/configs/debug/overfit.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# @package _global_ - -# overfits to 3 batches - -defaults: - - default - -trainer: - max_epochs: 20 - overfit_batches: 3 - -# model ckpt and early stopping need to be disabled during overfitting -callbacks: null diff --git a/configs/debug/profiler.yaml b/configs/debug/profiler.yaml deleted file mode 100644 index 2bd7da87ae23ed425ace99b09250a76a5634a3fb..0000000000000000000000000000000000000000 --- a/configs/debug/profiler.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# @package _global_ - -# runs with execution time profiling - -defaults: - - default - -trainer: - max_epochs: 1 - profiler: "simple" - # profiler: "advanced" - # profiler: "pytorch" diff --git a/configs/experiment/bindingdb.yaml b/configs/experiment/bindingdb.yaml deleted file mode 100644 index e3e170d30a9bbdd3d0252b43979b2861aeb39ce1..0000000000000000000000000000000000000000 --- a/configs/experiment/bindingdb.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [dti_benchmark/random_split_update/bindingdb_train.csv, - dti_benchmark/random_split_update/bindingdb_valid.csv, - dti_benchmark/random_split_update/bindingdb_test.csv] diff --git a/configs/experiment/chembl_random.yaml b/configs/experiment/chembl_random.yaml deleted file mode 100644 index 01b9e8aca42b0925a22303d9f2c3f8c4f5998340..0000000000000000000000000000000000000000 --- a/configs/experiment/chembl_random.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [chembl_random_global_balance_1_train.csv, - chembl_random_global_balance_1_valid.csv, - chembl_random_global_balance_1_test.csv] diff --git a/configs/experiment/chembl_rmfh_random.yaml b/configs/experiment/chembl_rmfh_random.yaml deleted file mode 100644 index a15e8ebfac7db334dc82d22adae2181c1473d9c8..0000000000000000000000000000000000000000 --- a/configs/experiment/chembl_rmfh_random.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [chembl_rmFH_random_global_balance_1_train.csv, - chembl_rmFH_random_global_balance_1_valid.csv, - chembl_rmFH_random_global_balance_1_test.csv] \ No newline at end of file diff --git a/configs/experiment/davis.yaml b/configs/experiment/davis.yaml deleted file mode 100644 index 95c628e21ba17d808363a8f89f36ad0513e6a1de..0000000000000000000000000000000000000000 --- a/configs/experiment/davis.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [dti_benchmark/random_split_update/davis_train.csv, - dti_benchmark/random_split_update/davis_valid.csv, - dti_benchmark/random_split_update/davis_test.csv] diff --git a/configs/experiment/demo_bindingdb.yaml b/configs/experiment/demo_bindingdb.yaml deleted file mode 100644 index 0ccc1bcdfd5a12025a0ff6aa591a6989f3db2982..0000000000000000000000000000000000000000 --- a/configs/experiment/demo_bindingdb.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - - override /data/split: random - -data: - data_file: demo/binddb_ic50_demo.csv - train_val_test_split: [0.7, 0.1, 0.2] diff --git a/configs/experiment/dti_experiment.yaml b/configs/experiment/dti_experiment.yaml deleted file mode 100644 index df70106572153a3444128f46023e58cdb6001a04..0000000000000000000000000000000000000000 --- a/configs/experiment/dti_experiment.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# @package _global_ -defaults: - - override /data: dti_data - - override /model: dti_model - - override /trainer: gpu - -seed: 12345 - -trainer: - min_epochs: 1 - max_epochs: 500 - precision: 16-mixed - -callbacks: - early_stopping: - patience: 50 - -data: - num_workers: 8 diff --git a/configs/experiment/example.yaml b/configs/experiment/example.yaml deleted file mode 100644 index 03babeb44c0eec70b050e1f92534b6cd8de770d8..0000000000000000000000000000000000000000 --- a/configs/experiment/example.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# @package _global_ - -# to execute this experiment run: -# python train.py experiment=example - -defaults: - - override /data: dti_data - - override /data/drug_featurizer: onehot - - override /data/protein_featurizer: onehot - - override /model: dti_model - - override /model/protein_encoder: cnn - - override /model/drug_encoder: cnn - - override /model/decoder: concat_mlp - - override /callbacks: default - - override /trainer: default - -# all parameters below will be merged with parameters from default configurations set above -# this allows you to overwrite only specified parameters - -tags: ["dti"] - -seed: 12345 - -data: - data_file: davis.csv - batch_size: 64 - -model: - optimizer: - lr: 0.0001 - -trainer: - min_epochs: 1 - max_epochs: 100 - accelerator: gpu \ No newline at end of file diff --git a/configs/experiment/ion_channels.yaml b/configs/experiment/ion_channels.yaml deleted file mode 100644 index 261d7dac49ccb908a63719c01fd04374e4d21019..0000000000000000000000000000000000000000 --- a/configs/experiment/ion_channels.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [dti_benchmark/ChEMBL33/train/Ion_channels_train_data.csv, - dti_benchmark/ChEMBL33/valid/Ion_channels_valid_data.csv, - dti_benchmark/ChEMBL33/test/Ion_channels_both_unseen_test_data.csv] diff --git a/configs/experiment/kiba.yaml b/configs/experiment/kiba.yaml deleted file mode 100644 index f3a83942f41b75c800bf467690b88b8397955615..0000000000000000000000000000000000000000 --- a/configs/experiment/kiba.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - /task: binary - -data: - train_val_test_split: [dti_benchmark/random_split_update/kiba_train.csv, - dti_benchmark/random_split_update/kiba_valid.csv, - dti_benchmark/random_split_update/kiba_test.csv] diff --git a/configs/experiment/kinase.yaml b/configs/experiment/kinase.yaml deleted file mode 100644 index c7d129aca1c9d93934efbe61a243cf7603d30143..0000000000000000000000000000000000000000 --- a/configs/experiment/kinase.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: - - dti_benchmark/ChEMBL33/train/kinase_train_data.csv - - null - - null -# dti_benchmark/ChEMBL33/valid/kinase_valid_data.csv, -# dti_benchmark/ChEMBL33/test/kinase_both_unseen_test_data.csv - diff --git a/configs/experiment/membrane_receptors.yaml b/configs/experiment/membrane_receptors.yaml deleted file mode 100644 index a06d357351dba800c885d7d92e51a7a7ccffc474..0000000000000000000000000000000000000000 --- a/configs/experiment/membrane_receptors.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: - - dti_benchmark/ChEMBL33/train/Membrane_receptor_train_data.csv - - null - - null -# dti_benchmark/ChEMBL33/valid/Membrane_receptor_valid_data.csv, -# dti_benchmark/ChEMBL33/test/Membrane_receptor_drug_repo_test_data.csv - diff --git a/configs/experiment/non_kinase_enzymes.yaml b/configs/experiment/non_kinase_enzymes.yaml deleted file mode 100644 index 314eeb66a368dfcab29313a647e620ee5bcd7cd9..0000000000000000000000000000000000000000 --- a/configs/experiment/non_kinase_enzymes.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: - - dti_benchmark/ChEMBL33/train/Non_kinase_enzyme_train_data.csv - - null - - null -# dti_benchmark/ChEMBL33/valid/Non_kinase_enzyme_valid_data.csv, -# dti_benchmark/ChEMBL33/test/Non_kinase_enzyme_both_unseen_test_data.csv - diff --git a/configs/experiment/nuclear_receptors.yaml b/configs/experiment/nuclear_receptors.yaml deleted file mode 100644 index 2edf8a4dea610a9a7c2a9b73178f452888bb20c2..0000000000000000000000000000000000000000 --- a/configs/experiment/nuclear_receptors.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [dti_benchmark/ChEMBL33/train/Nuclear_receptors_train_data.csv, - dti_benchmark/ChEMBL33/valid/Nuclear_receptors_valid_data.csv, - dti_benchmark/ChEMBL33/test/Nuclear_receptors_both_unseen_test_data.csv] diff --git a/configs/experiment/other_protein_targets.yaml b/configs/experiment/other_protein_targets.yaml deleted file mode 100644 index ffe3f295621ee3ffc160521e88e2bdf547719190..0000000000000000000000000000000000000000 --- a/configs/experiment/other_protein_targets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - - override /task: binary - -data: - train_val_test_split: [dti_benchmark/ChEMBL33/train/Other_protein_targets_train_data.csv, - dti_benchmark/ChEMBL33/valid/Other_protein_targets_valid_data.csv, - dti_benchmark/ChEMBL33/test/Other_protein_targets_both_unseen_test_data.csv] diff --git a/configs/extras/default.yaml b/configs/extras/default.yaml deleted file mode 100644 index b9c6b622283a647fbc513166fc14f016cc3ed8a0..0000000000000000000000000000000000000000 --- a/configs/extras/default.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# disable python warnings if they annoy you -ignore_warnings: False - -# ask user for tags if none are provided in the config -enforce_tags: True - -# pretty print config tree at the start of the run using Rich library -print_config: True diff --git a/configs/hydra/callbacks/csv_experiment_summary.yaml b/configs/hydra/callbacks/csv_experiment_summary.yaml deleted file mode 100644 index 1b0d00b4f7c73dfdc02846e428fc6793133cc549..0000000000000000000000000000000000000000 --- a/configs/hydra/callbacks/csv_experiment_summary.yaml +++ /dev/null @@ -1,3 +0,0 @@ -csv_experiment_summary: - _target_: deepscreen.utils.hydra.CSVExperimentSummary - prefix: ['test/', 'epoch'] \ No newline at end of file diff --git a/configs/hydra/callbacks/default.yaml b/configs/hydra/callbacks/default.yaml deleted file mode 100644 index 3e7617d233fb7f325cdd03caf82bcc0e2ba6bf6d..0000000000000000000000000000000000000000 --- a/configs/hydra/callbacks/default.yaml +++ /dev/null @@ -1,2 +0,0 @@ -defaults: - - csv_experiment_summary diff --git a/configs/hydra/default.yaml b/configs/hydra/default.yaml deleted file mode 100644 index 8d76171f4585f3c26e6c249ba9d4e1617994b421..0000000000000000000000000000000000000000 --- a/configs/hydra/default.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# https://hydra.cc/docs/configure_hydra/intro/ - -# enable color logging -defaults: - - override callbacks: default - - override hydra_logging: colorlog - - override job_logging: colorlog - -# output directory, generated dynamically on each run -run: - dir: ${paths.log_dir}/${job_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S-%f}_[${eval:'",".join(${tags})'}] -sweep: - dir: ${paths.log_dir}/${job_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S-%f}_[${eval:'",".join(${tags})'}] - # Sanitize override_dirname by replacing unsafe characters to avoid unintended subdirectory creation - subdir: ${sanitize_path:'${hydra:job.id}-${hydra:job.override_dirname}'} - -job_logging: - handlers: - file: - filename: ${hydra:runtime.output_dir}/${hydra.job.name}.log - -job: - config: - override_dirname: - kv_sep: '=' - item_sep: ';' - exclude_keys: ['tags', 'sweep', 'data.data_file', 'data.train_val_test_split', 'ckpt_path', 'trainer'] diff --git a/configs/hydra/launcher/submitit_local_example.yaml b/configs/hydra/launcher/submitit_local_example.yaml deleted file mode 100644 index 2f47752804b52ea1216d750ac3229d12e463aaff..0000000000000000000000000000000000000000 --- a/configs/hydra/launcher/submitit_local_example.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# @package _global_ -defaults: - - submitit_local - -submitit_folder: ${hydra.sweep.dir}/.submitit/%j -timeout_min: 60 -cpus_per_task: 1 -gpus_per_node: 1 -tasks_per_node: 8 -mem_gb: 16 -nodes: 1 -name: ${hydra.job.name} diff --git a/configs/hydra/launcher/submitit_slurm_example.yaml b/configs/hydra/launcher/submitit_slurm_example.yaml deleted file mode 100644 index 1dec61801ed47679ea6598ef2540965e110249c4..0000000000000000000000000000000000000000 --- a/configs/hydra/launcher/submitit_slurm_example.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# @package _global_ -defaults: - - submitit_slurm - -hydra: - mode: "MULTIRUN" - launcher: - submitit_folder: ${hydra.sweep.dir}/.submitit/%j - timeout_min: null - cpus_per_task: null - gpus_per_node: null - tasks_per_node: 1 - mem_gb: null - nodes: 1 - name: ${hydra.job.name} - partition: null - qos: null - comment: null - constraint: null - exclude: null - gres: null - cpus_per_gpu: null - gpus_per_task: null - mem_per_gpu: null - mem_per_cpu: null - account: null - signal_delay_s: 120 - max_num_timeout: 0 - additional_parameters: {} - array_parallelism: 256 - setup: null diff --git a/configs/hydra/sweeper/optuna_hps.yaml b/configs/hydra/sweeper/optuna_hps.yaml deleted file mode 100644 index 316cf24ab252557a08a5e82fa8c9c462b9835246..0000000000000000000000000000000000000000 --- a/configs/hydra/sweeper/optuna_hps.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# @package _global_ - -# example batch experiment of some experiment with Optuna: -# python train.py -m sweep=optuna experiment=example - -defaults: - - optuna - -# choose metric which will be optimized by Optuna -# make sure this is the correct name of some metric logged in lightning module! -objective_metrics: ["val/auroc"] - -# here we define Optuna hyperparameter search -# it optimizes for value returned from function with @hydra.main decorator -# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper -hydra: - mode: "MULTIRUN" # set hydra to multirun by default if this config is attached - sweeper: - # storage URL to persist optimization results - # for example, you can use SQLite if you set 'sqlite:///example.db' - storage: null - - # name of the study to persist optimization results - study_name: null - - # number of parallel workers - n_jobs: 1 - - # 'minimize' or 'maximize' the objective - direction: minimize - - # total number of runs that will be executed - n_trials: 20 - - # choose Optuna hyperparameter sampler - # you can choose bayesian sampler (tpe), random search (without optimization), grid sampler, and others - # docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html - sampler: - _target_: optuna.samplers.TPESampler - seed: 12345 - n_startup_trials: 10 # number of random sampling runs before optimization starts - - # define hyperparameter search space - params: ??? diff --git a/configs/local/.gitkeep b/configs/local/.gitkeep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/local/gpu3090.yaml b/configs/local/gpu3090.yaml deleted file mode 100644 index 407f3e2b68c5f268b917310112398cdd607c51e6..0000000000000000000000000000000000000000 --- a/configs/local/gpu3090.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# @package _global_ -defaults: - - override /hydra/launcher: submitit_slurm - -hydra: - launcher: - submitit_folder: ${hydra.sweep.dir}/.submitit - timeout_min: 6000 - cpus_per_task: 8 - gpus_per_task: 1 - gres: gpu:1 - partition: gpu3090 - qos: gpu3090 - additional_parameters: {} - array_parallelism: 256 diff --git a/configs/logger/comet.yaml b/configs/logger/comet.yaml deleted file mode 100644 index ebe6c6f19bd172255e9f29148df10a4d2ec42c84..0000000000000000000000000000000000000000 --- a/configs/logger/comet.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# https://www.comet.ml - -comet: - _target_: lightning.pytorch.loggers.comet.CometLogger - api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable - save_dir: "${paths.output_dir}" - project_name: "deepscreen" - rest_api_key: null - # experiment_name: "" - experiment_key: null # set to resume experiment - offline: False - prefix: "" \ No newline at end of file diff --git a/configs/logger/csv.yaml b/configs/logger/csv.yaml deleted file mode 100644 index bf9af6fb5e70b1382a1421397b12ea677dd8926f..0000000000000000000000000000000000000000 --- a/configs/logger/csv.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# csv logger built in lightning - -csv: - _target_: lightning.pytorch.loggers.csv_logs.CSVLogger - save_dir: "${paths.output_dir}" - name: "csv/" - prefix: "" - version: "" diff --git a/configs/logger/default.yaml b/configs/logger/default.yaml deleted file mode 100644 index bfdb2248689fcc8c5827c6c99c0d42bfad37b3c8..0000000000000000000000000000000000000000 --- a/configs/logger/default.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - csv -# - mlflow -# - wandb -# - comet \ No newline at end of file diff --git a/configs/logger/mlflow.yaml b/configs/logger/mlflow.yaml deleted file mode 100644 index bed42abc9cd686d89137e236a009c2c9277f2608..0000000000000000000000000000000000000000 --- a/configs/logger/mlflow.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# https://mlflow.org - -mlflow: - _target_: lightning.pytorch.loggers.mlflow.MLFlowLogger - # experiment_name: "" - # run_name: "" - tracking_uri: "file://${paths.output_dir}/mlflow/" # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI - tags: ${tags} - # save_dir: "./mlruns" - prefix: "" - artifact_location: null - # run_id: "" \ No newline at end of file diff --git a/configs/logger/multiple_loggers.yaml b/configs/logger/multiple_loggers.yaml deleted file mode 100644 index 40d561d1b723cb314de4272210a96c4343dcace9..0000000000000000000000000000000000000000 --- a/configs/logger/multiple_loggers.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# train with multiple loggers at once -defaults: - - csv - - tensorboard -# - mlflow -# - wandb \ No newline at end of file diff --git a/configs/logger/neptune.yaml b/configs/logger/neptune.yaml deleted file mode 100644 index 086e85fc59117f4a8035d0c1773e4c856fd7e995..0000000000000000000000000000000000000000 --- a/configs/logger/neptune.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# https://neptune.ai - -neptune: - _target_: lightning.pytorch.loggers.neptune.NeptuneLogger - api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is loaded from environment variable - project: username/deepscreen - # name: "" - log_model_checkpoints: True - prefix: "" \ No newline at end of file diff --git a/configs/logger/tensorboard.yaml b/configs/logger/tensorboard.yaml deleted file mode 100644 index f438664df015f88bd3cabffaa520da3ef98dc379..0000000000000000000000000000000000000000 --- a/configs/logger/tensorboard.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# https://www.tensorflow.org/tensorboard/ - -tensorboard: - _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger - save_dir: "${paths.output_dir}/tensorboard/" - name: null - log_graph: False - default_hp_metric: True - prefix: "" - version: "" diff --git a/configs/logger/wandb.yaml b/configs/logger/wandb.yaml deleted file mode 100644 index 9ec9ba26d7673c20005d8e67e3bed2b647c80218..0000000000000000000000000000000000000000 --- a/configs/logger/wandb.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# https://wandb.ai - -wandb: - _target_: lightning.pytorch.loggers.wandb.WandbLogger - # name: "" # name of the run (normally generated by wandb) - save_dir: "${paths.output_dir}" - offline: True - id: null # pass correct id to resume experiment! - anonymous: null # enable anonymous logging - project: "deepscreen" - log_model: False # upload lightning ckpts - prefix: "" # a string to put at the beginning of metric keys - # entity: "" # set to name of your wandb team - group: "" - tags: ${tags} - job_type: "" \ No newline at end of file diff --git a/configs/model/dti_model.yaml b/configs/model/dti_model.yaml deleted file mode 100644 index 72b1c9ee2b75a2f6ffce9a33b8d42cafd9cc67f0..0000000000000000000000000000000000000000 --- a/configs/model/dti_model.yaml +++ /dev/null @@ -1,12 +0,0 @@ -_target_: deepscreen.models.dti.DTILightningModule - -defaults: - - _self_ - - optimizer: adam - - scheduler: default - - predictor: none - - metrics: dti_metrics - -out: ${task.out} -loss: ${task.loss} -activation: ${task.activation} diff --git a/configs/model/loss/multitask_loss.yaml b/configs/model/loss/multitask_loss.yaml deleted file mode 100644 index 6dfe6353808f3f91a42e151a34a3cae056e0fc7e..0000000000000000000000000000000000000000 --- a/configs/model/loss/multitask_loss.yaml +++ /dev/null @@ -1,7 +0,0 @@ -_target_: deepscreen.models.loss.multitask_loss.MultitaskLoss - -loss_fns: - - _target_: torch.nn.MSELoss - - _target_: torch.nn.CrossEntropyLoss - weight: null -reduction: sum \ No newline at end of file diff --git a/configs/model/metrics/accuracy.yaml b/configs/model/metrics/accuracy.yaml deleted file mode 100644 index 80a9d3f8a6837571b70d0cfb70ba7c6f59ee1c3e..0000000000000000000000000000000000000000 --- a/configs/model/metrics/accuracy.yaml +++ /dev/null @@ -1,4 +0,0 @@ -accuracy: - _target_: torchmetrics.Accuracy - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/auprc.yaml b/configs/model/metrics/auprc.yaml deleted file mode 100644 index b9de03c65fdeec91e8b39f38dd773d9956e2e8ce..0000000000000000000000000000000000000000 --- a/configs/model/metrics/auprc.yaml +++ /dev/null @@ -1,4 +0,0 @@ -auprc: - _target_: torchmetrics.AveragePrecision - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/auroc.yaml b/configs/model/metrics/auroc.yaml deleted file mode 100644 index a4bcdbbd885ae5ba05120e5568ca7d3a323f213f..0000000000000000000000000000000000000000 --- a/configs/model/metrics/auroc.yaml +++ /dev/null @@ -1,4 +0,0 @@ -auroc: - _target_: torchmetrics.AUROC - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/bedroc.yaml b/configs/model/metrics/bedroc.yaml deleted file mode 100644 index 86c68585882faefde5ee711dfe647d406a06dd09..0000000000000000000000000000000000000000 --- a/configs/model/metrics/bedroc.yaml +++ /dev/null @@ -1,3 +0,0 @@ -bedroc: - _target_: deepscreen.models.metrics.bedroc.BEDROC - alpha: 80.5 \ No newline at end of file diff --git a/configs/model/metrics/concordance_index.yaml b/configs/model/metrics/concordance_index.yaml deleted file mode 100644 index 634ea1906b5d0307e2e0342beec04c28e66af308..0000000000000000000000000000000000000000 --- a/configs/model/metrics/concordance_index.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# FIXME: implement concordance index -_target_: \ No newline at end of file diff --git a/configs/model/metrics/confusion_matrix.yaml b/configs/model/metrics/confusion_matrix.yaml deleted file mode 100644 index d0be32b6b92a93e743d3926bf19fc3849c2e9260..0000000000000000000000000000000000000000 --- a/configs/model/metrics/confusion_matrix.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: torchmetrics.ConfusionMatrix \ No newline at end of file diff --git a/configs/model/metrics/dta_metrics.yaml b/configs/model/metrics/dta_metrics.yaml deleted file mode 100644 index 2b0f108cb9df18dfcceae6af9fd94789685a54b2..0000000000000000000000000000000000000000 --- a/configs/model/metrics/dta_metrics.yaml +++ /dev/null @@ -1,2 +0,0 @@ -defaults: - - mean_squared_error diff --git a/configs/model/metrics/dti_metrics.yaml b/configs/model/metrics/dti_metrics.yaml deleted file mode 100644 index 93ca0c40a0d77ffb095ab91d52863667bf5746cc..0000000000000000000000000000000000000000 --- a/configs/model/metrics/dti_metrics.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# train with many loggers at once - -defaults: - - auroc - - auprc - - specificity - - sensitivity - - precision - - recall - - f1_score -# Common virtual screening metrics: -# - ef -# - bedroc -# - hit_rate diff --git a/configs/model/metrics/ef.yaml b/configs/model/metrics/ef.yaml deleted file mode 100644 index 82553b414da98c55508db9830d82b12af3db786d..0000000000000000000000000000000000000000 --- a/configs/model/metrics/ef.yaml +++ /dev/null @@ -1,7 +0,0 @@ -ef1: - _target_: deepscreen.models.metrics.ef.EF - alpha: 0.01 - -ef5: - _target_: deepscreen.models.metrics.ef.EF - alpha: 0.05 \ No newline at end of file diff --git a/configs/model/metrics/f1_score.yaml b/configs/model/metrics/f1_score.yaml deleted file mode 100644 index abfb6e4ca37a9dad399aeb3b1244d958d542e238..0000000000000000000000000000000000000000 --- a/configs/model/metrics/f1_score.yaml +++ /dev/null @@ -1,4 +0,0 @@ -f1_score: - _target_: torchmetrics.F1Score - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/hit_rate.yaml b/configs/model/metrics/hit_rate.yaml deleted file mode 100644 index 70976774eb365fdf8b8c1f97bd3ad19d6cb64cb8..0000000000000000000000000000000000000000 --- a/configs/model/metrics/hit_rate.yaml +++ /dev/null @@ -1,3 +0,0 @@ -hit_rate: - _target_: deepscreen.models.metrics.hit_rate.HitRate - alpha: 0.05 diff --git a/configs/model/metrics/mean_squared_error.yaml b/configs/model/metrics/mean_squared_error.yaml deleted file mode 100644 index 0d9a18c60d43210b16878ff479b1cfa3168788cf..0000000000000000000000000000000000000000 --- a/configs/model/metrics/mean_squared_error.yaml +++ /dev/null @@ -1,2 +0,0 @@ -mean_squared_error: - _target_: torchmetrics.MeanSquaredError \ No newline at end of file diff --git a/configs/model/metrics/prc.yaml b/configs/model/metrics/prc.yaml deleted file mode 100644 index 75e3ee320d5b9a32a9acdb55d564fbacab975088..0000000000000000000000000000000000000000 --- a/configs/model/metrics/prc.yaml +++ /dev/null @@ -1,4 +0,0 @@ -prc: - _target_: torchmetrics.PrecisionRecallCurve - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/precision.yaml b/configs/model/metrics/precision.yaml deleted file mode 100644 index 4b8212b1999c10b627de5859e034260a39022608..0000000000000000000000000000000000000000 --- a/configs/model/metrics/precision.yaml +++ /dev/null @@ -1,4 +0,0 @@ -precision: - _target_: torchmetrics.Precision - task: ${task.task} - num_classes: ${task.num_classes} diff --git a/configs/model/metrics/recall.yaml b/configs/model/metrics/recall.yaml deleted file mode 100644 index eadad752ad1c4e1ac137580a289d4d32916ca975..0000000000000000000000000000000000000000 --- a/configs/model/metrics/recall.yaml +++ /dev/null @@ -1,4 +0,0 @@ -recall: - _target_: torchmetrics.Recall - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/roc.yaml b/configs/model/metrics/roc.yaml deleted file mode 100644 index 91968a6f42e3d399f39e587785eaddabda523a23..0000000000000000000000000000000000000000 --- a/configs/model/metrics/roc.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roc: - _target_: torchmetrics.ROC - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/sensitivity.yaml b/configs/model/metrics/sensitivity.yaml deleted file mode 100644 index 49568b4512c2b75ebfce98d8ee03e2b1148966cc..0000000000000000000000000000000000000000 --- a/configs/model/metrics/sensitivity.yaml +++ /dev/null @@ -1,4 +0,0 @@ -sensitivity: - _target_: deepscreen.models.metrics.sensitivity.Sensitivity - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/specificity.yaml b/configs/model/metrics/specificity.yaml deleted file mode 100644 index 5b161be947876081ce9b6b070ed7201874f5cbc6..0000000000000000000000000000000000000000 --- a/configs/model/metrics/specificity.yaml +++ /dev/null @@ -1,4 +0,0 @@ -specificity: - _target_: torchmetrics.Specificity - task: ${task.task} - num_classes: ${task.num_classes} \ No newline at end of file diff --git a/configs/model/metrics/test_metrics.yaml b/configs/model/metrics/test_metrics.yaml deleted file mode 100644 index 2ff2b80870d15e9863158e6dfbd17cf3dd9586c2..0000000000000000000000000000000000000000 --- a/configs/model/metrics/test_metrics.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# train with many loggers at once - -defaults: - - auroc - - auprc - - roc - - prc -# Common virtual screening metrics: -# - ef -# - bedroc -# - hit_rate diff --git a/configs/model/optimizer/adam.yaml b/configs/model/optimizer/adam.yaml deleted file mode 100644 index 67ab7f2cfeeb16daef186434fd4655afab56bf37..0000000000000000000000000000000000000000 --- a/configs/model/optimizer/adam.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: torch.optim.Adam -_partial_: true - -lr: 0.0001 -weight_decay: 0.0 \ No newline at end of file diff --git a/configs/model/optimizer/none.yaml b/configs/model/optimizer/none.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/model/predictor/custom.yaml b/configs/model/predictor/custom.yaml deleted file mode 100644 index d7a867b87888c03a7209b4b78bbdf2dadfff07a2..0000000000000000000000000000000000000000 --- a/configs/model/predictor/custom.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.models.predictors.custom.CustomPredictor - -defaults: - - drug_encoder: cnn - - protein_encoder: cnn - - decoder: concat_mlp diff --git a/configs/model/predictor/decoder/concat_mlp.yaml b/configs/model/predictor/decoder/concat_mlp.yaml deleted file mode 100644 index 17b92c9493a45d51b3463dc069a73295fc22eb4f..0000000000000000000000000000000000000000 --- a/configs/model/predictor/decoder/concat_mlp.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.models.components.mlp.ConcatMLP - -input_channels: ${eval:${model.drug_encoder.out_channels}+${model.protein_encoder.out_channels}} -out_channels: 512 -hidden_channels: [1024,1024] -dropout: 0.1 \ No newline at end of file diff --git a/configs/model/predictor/decoder/mlp_deepdta.yaml b/configs/model/predictor/decoder/mlp_deepdta.yaml deleted file mode 100644 index b6ee28076224a373a6879ece9296491dccfd280c..0000000000000000000000000000000000000000 --- a/configs/model/predictor/decoder/mlp_deepdta.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: deepscreen.models.components.mlp.MLP2 - -input_channels: ${eval:${model.drug_encoder.out_channels}+${model.protein_encoder.out_channels}} -out_channels: 1 -hidden_channels: [1024,1024,512] -dropout: 0.1 \ No newline at end of file diff --git a/configs/model/predictor/decoder/mlp_lazy.yaml b/configs/model/predictor/decoder/mlp_lazy.yaml deleted file mode 100644 index 832863817e37fed6e5ef54eba4de99341310c4dc..0000000000000000000000000000000000000000 --- a/configs/model/predictor/decoder/mlp_lazy.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.models.components.mlp.LazyMLP - -out_channels: 1 -hidden_channels: [1024,1024,512] -dropout: 0.1 \ No newline at end of file diff --git a/configs/model/predictor/deep_dta.yaml b/configs/model/predictor/deep_dta.yaml deleted file mode 100644 index 7f6cfc42a5e850554e0825d803f79575abdcdb1b..0000000000000000000000000000000000000000 --- a/configs/model/predictor/deep_dta.yaml +++ /dev/null @@ -1,15 +0,0 @@ -_target_: deepscreen.models.predictors.deep_dta.DeepDTA - -defaults: - - drug_encoder@drug_cnn: cnn - - protein_encoder@protein_cnn: cnn -# - /model/decoder@fc: concat_mlp - -num_features_drug: 63 -num_features_protein: 26 -embed_dim: 128 - -drug_cnn: - in_channels: ${data.drug_featurizer.max_sequence_length} -protein_cnn: - in_channels: ${data.protein_featurizer.max_sequence_length} \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/cnn.yaml b/configs/model/predictor/drug_encoder/cnn.yaml deleted file mode 100644 index 453ef99dec6c2a5db821181e72eef24c9faab966..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/cnn.yaml +++ /dev/null @@ -1,9 +0,0 @@ -_target_: deepscreen.models.components.cnn.CNN - -max_sequence_length: ${data.drug_featurizer.max_sequence_length} -filters: [32, 64, 96] -kernels: [4, 6, 8] -in_channels: ${data.drug_featurizer.in_channels} -out_channels: 256 - -# TODO refactor the in_channels argument pipeline to be more reasonable \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/cnn_deepdta.yaml b/configs/model/predictor/drug_encoder/cnn_deepdta.yaml deleted file mode 100644 index 97bf3a4870e224b2bc7a5eab946d38b57c279d26..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/cnn_deepdta.yaml +++ /dev/null @@ -1,7 +0,0 @@ -_target_: deepscreen.models.components.cnn_deepdta.CNN_DeepDTA - -max_sequence_length: ${data.drug_featurizer.max_sequence_length} -filters: [32, 64, 96] -kernels: [4, 6, 8] -in_channels: ${data.drug_featurizer.in_channels} -out_channels: 128 \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/gat.yaml b/configs/model/predictor/drug_encoder/gat.yaml deleted file mode 100644 index 3b8dddd5ab5fac2b3cefff8191c01d3ab393c8a5..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/gat.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.models.components.gat.GAT - -num_features: 78 -out_channels: 128 -dropout: 0.2 \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/gcn.yaml b/configs/model/predictor/drug_encoder/gcn.yaml deleted file mode 100644 index 5e2da337b324610297bd76b6783340d03bd681a8..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/gcn.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.models.components.gcn.GCN - -num_features: 78 -out_channels: 128 -dropout: 0.2 \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/gin.yaml b/configs/model/predictor/drug_encoder/gin.yaml deleted file mode 100644 index caf5820c158ff7956b2440d25f7b5f901936f683..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/gin.yaml +++ /dev/null @@ -1,5 +0,0 @@ -_target_: deepscreen.models.components.gin.GIN - -num_features: 78 -out_channels: 128 -dropout: 0.2 \ No newline at end of file diff --git a/configs/model/predictor/drug_encoder/lstm.yaml b/configs/model/predictor/drug_encoder/lstm.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/model/predictor/drug_encoder/transformer.yaml b/configs/model/predictor/drug_encoder/transformer.yaml deleted file mode 100644 index 5eee1571675bcb2d2d539dbaa69ea0268c7b3908..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_encoder/transformer.yaml +++ /dev/null @@ -1,11 +0,0 @@ -_target_: deepscreen.models.components.transformer - -input_dim: 1024 -emb_size: 128 -max_position_size: 50 -dropout: 0.1 -n_layer: 8 -intermediate_size: 512 -num_attention_heads: 8 -attention_probs_dropout: 0.1 -hidden_dropout: 0.1 \ No newline at end of file diff --git a/configs/model/predictor/drug_vqa.yaml b/configs/model/predictor/drug_vqa.yaml deleted file mode 100644 index 8ee31b6b3e5a62bec7998d3bdb4c4619ecd27e7f..0000000000000000000000000000000000000000 --- a/configs/model/predictor/drug_vqa.yaml +++ /dev/null @@ -1,15 +0,0 @@ -_target_: deepscreen.models.predictors.drug_vqa.DrugVQA - -conv_dim: 1 -lstm_hid_dim: 64 -d_a: 32 -r: 10 -n_chars_smi: 247 -n_chars_seq: 21 -dropout: 0.2 -in_channels: 8 -cnn_channels: 32 -cnn_layers: 4 -emb_dim: 30 -dense_hid: 64 - diff --git a/configs/model/predictor/graph_dta.yaml b/configs/model/predictor/graph_dta.yaml deleted file mode 100644 index 35bfdf8adc5ab69b780e81c828a195e48e8ec1a0..0000000000000000000000000000000000000000 --- a/configs/model/predictor/graph_dta.yaml +++ /dev/null @@ -1,16 +0,0 @@ -defaults: - - drug_encoder@gnn: gat - - _self_ - -_target_: deepscreen.models.predictors.graph_dta.GraphDTA - -gnn: - num_features: 34 - out_channels: 128 - dropout: 0.2 - -num_features_protein: 26 -n_filters: 32 -embed_dim: 128 -output_dim: 128 -dropout: 0.2 diff --git a/configs/model/predictor/hyper_attention_dti.yaml b/configs/model/predictor/hyper_attention_dti.yaml deleted file mode 100644 index 87dde659688073d247953fce5ceef65a38786789..0000000000000000000000000000000000000000 --- a/configs/model/predictor/hyper_attention_dti.yaml +++ /dev/null @@ -1,8 +0,0 @@ -_target_: deepscreen.models.predictors.hyper_attention_dti.HyperAttentionDTI - -protein_kernel: [4,8,12] -drug_kernel: [4,6,8] -conv: 40 -char_dim: 64 -protein_max_len: ${data.protein_featurizer.max_sequence_length} -drug_max_len: ${data.drug_featurizer.max_sequence_length} \ No newline at end of file diff --git a/configs/model/predictor/none.yaml b/configs/model/predictor/none.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/model/predictor/protein_encoder/cnn.yaml b/configs/model/predictor/protein_encoder/cnn.yaml deleted file mode 100644 index 9363e5e6130a5292d173b90eba8430c12edef3d1..0000000000000000000000000000000000000000 --- a/configs/model/predictor/protein_encoder/cnn.yaml +++ /dev/null @@ -1,7 +0,0 @@ -_target_: deepscreen.models.components.cnn.CNN - -max_sequence_length: ${data.protein_featurizer.max_sequence_length} -filters: [32, 64, 96] -kernels: [4, 8, 12] -in_channels: ${data.protein_featurizer.in_channels} -out_channels: 256 \ No newline at end of file diff --git a/configs/model/predictor/protein_encoder/cnn_deepdta.yaml b/configs/model/predictor/protein_encoder/cnn_deepdta.yaml deleted file mode 100644 index 8ac5f6d9064695cdfe2739946c1e55d55aa588d5..0000000000000000000000000000000000000000 --- a/configs/model/predictor/protein_encoder/cnn_deepdta.yaml +++ /dev/null @@ -1,7 +0,0 @@ -_target_: deepscreen.models.components.cnn_deepdta.CNN_DeepDTA - -max_sequence_length: ${data.protein_featurizer.max_sequence_length} -filters: [32, 64, 96] -kernels: [4, 8, 12] -in_channels: ${data.protein_featurizer.in_channels} -out_channels: 128 \ No newline at end of file diff --git a/configs/model/predictor/protein_encoder/lstm.yaml b/configs/model/predictor/protein_encoder/lstm.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/model/predictor/protein_encoder/tape_bert.yaml b/configs/model/predictor/protein_encoder/tape_bert.yaml deleted file mode 100644 index 6a64b46c70850a5d796a19615ba553e69cb26a39..0000000000000000000000000000000000000000 --- a/configs/model/predictor/protein_encoder/tape_bert.yaml +++ /dev/null @@ -1,3 +0,0 @@ -_target_: tape.ProteinBertModel.from_pretrained - -pretrained_model_name_or_path: bert-base \ No newline at end of file diff --git a/configs/model/predictor/protein_encoder/transformer.yaml b/configs/model/predictor/protein_encoder/transformer.yaml deleted file mode 100644 index 4fb7c9761bc098da84773f56bc949ce7c7d34c5f..0000000000000000000000000000000000000000 --- a/configs/model/predictor/protein_encoder/transformer.yaml +++ /dev/null @@ -1,12 +0,0 @@ -_target_: deepscreen.models.components.transformer - -input_dim: 8420 -emb_size: 64 -max_position_size: 545 50 -dropout: 0.1 -n_layer: 2 -intermediate_size: 256 -num_attention_heads: 4 -attention_probs_dropout: 0.1 -hidden_dropout: 0.1 - diff --git a/configs/model/predictor/transformer_cpi.yaml b/configs/model/predictor/transformer_cpi.yaml deleted file mode 100644 index c7d137ba108186e77e67878c4ad6ba40dbad52d3..0000000000000000000000000000000000000000 --- a/configs/model/predictor/transformer_cpi.yaml +++ /dev/null @@ -1,10 +0,0 @@ -_target_: deepscreen.models.predictors.transformer_cpi.TransformerCPI - -protein_dim: 100 -atom_dim: 34 -hid_dim: 64 -n_layers: 3 -n_heads: 8 -pf_dim: 256 -dropout: 0.1 -kernel_size: 7 \ No newline at end of file diff --git a/configs/model/predictor/transformer_cpi_2.yaml b/configs/model/predictor/transformer_cpi_2.yaml deleted file mode 100644 index 2f9439a99c02720650635f2024acb36ef45b18fa..0000000000000000000000000000000000000000 --- a/configs/model/predictor/transformer_cpi_2.yaml +++ /dev/null @@ -1,14 +0,0 @@ -_target_: deepscreen.models.predictors.transformer_cpi_2.TransformerCPI2 - -encoder: - _target_: deepscreen.models.predictors.transformer_cpi_2.Encoder - # /model/protein_encoder@pretrain: tape_bert - n_layers: 3 - pretrain: - _target_: tape.ProteinBertModel.from_pretrained - pretrained_model_name_or_path: bert-base - -decoder: - _target_: deepscreen.models.predictors.transformer_cpi_2.Decoder - n_layers: 3 - dropout: 0.1 diff --git a/configs/model/scheduler/default.yaml b/configs/model/scheduler/default.yaml deleted file mode 100644 index a319ea4099d233cec67de9477bbce25744368a91..0000000000000000000000000000000000000000 --- a/configs/model/scheduler/default.yaml +++ /dev/null @@ -1,11 +0,0 @@ -scheduler: - _target_: torch.optim.lr_scheduler.ReduceLROnPlateau - _partial_: true - - mode: min - factor: 0.1 - patience: 10 - -monitor: ${oc.select:callbacks.model_checkpoint.monitor,"val/loss"} -interval: "epoch" -frequency: 1 \ No newline at end of file diff --git a/configs/model/scheduler/none.yaml b/configs/model/scheduler/none.yaml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/configs/model/scheduler/reduce_lr_on_plateau.yaml b/configs/model/scheduler/reduce_lr_on_plateau.yaml deleted file mode 100644 index 221efd82d131473c6a51da966d153bb2ec85b69c..0000000000000000000000000000000000000000 --- a/configs/model/scheduler/reduce_lr_on_plateau.yaml +++ /dev/null @@ -1,6 +0,0 @@ -_target_: torch.optim.lr_scheduler.ReduceLROnPlateau -_partial_: true - -mode: min -factor: 0.1 -patience: 10 diff --git a/configs/paths/default.yaml b/configs/paths/default.yaml deleted file mode 100644 index 862261ca7797da591715936cfd88a047c1722da3..0000000000000000000000000000000000000000 --- a/configs/paths/default.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# path to root directory -root_dir: . - -# path to data directory -data_dir: ${paths.root_dir}/data - -# path to log directory -log_dir: ${paths.root_dir}/logs - -# path to resource directory -resource_dir: ${paths.root_dir}/resources - -# path to output directory, created dynamically by hydra -# path generation pattern is specified in `configs/hydra/default.yaml` -# use it to store all files generated during the run, like ckpts and metrics -output_dir: ${hydra:runtime.output_dir} - -# path to working directory -work_dir: ${hydra:runtime.cwd} diff --git a/configs/predict.yaml b/configs/predict.yaml deleted file mode 100644 index 2626280078af1ed4a3160e07f344afdef509f327..0000000000000000000000000000000000000000 --- a/configs/predict.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# @package _global_ - -defaults: - - model: dti_model # fixed for web server version - - task: null - - data: dti_data # fixed for web server version - - callbacks: - - model_summary - - rich_progress_bar - - csv_prediction_writer - - trainer: default - - paths: default - - extras: default - - hydra: default - - _self_ - - preset: null - - experiment: null - - sweep: null - - debug: null - - optional local: default - -job_name: "predict" - -tags: ??? - -# passing checkpoint path is necessary for prediction -ckpt_path: ??? diff --git a/configs/preset/bacpi.yaml b/configs/preset/bacpi.yaml deleted file mode 100644 index 9dbea130fd0f6b6fe07f009c340f33f6f8f6c558..0000000000000000000000000000000000000000 --- a/configs/preset/bacpi.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# @package _global_ -model: - predictor: - _target_: deepscreen.models.predictors.bacpi.BACPI - - n_atom: 20480 - n_amino: 8448 - comp_dim: 80 - prot_dim: 80 - latent_dim: 80 - gat_dim: 50 - num_head: 3 - dropout: 0.1 - alpha: 0.1 - window: 5 - layer_cnn: 3 - optimizer: - lr: 5e-4 - -data: - batch_size: 16 - - collator: - automatic_padding: True - - drug_featurizer: - _target_: deepscreen.models.predictors.bacpi.drug_featurizer - _partial_: true - radius: 2 - - protein_featurizer: - _target_: deepscreen.models.predictors.bacpi.split_sequence - _partial_: true - ngram: 3 -# collator: -# _target_: deepscreen.models.predictors.transformer_cpi_2.pack -# _partial_: true diff --git a/configs/preset/coa_dti_pro.yaml b/configs/preset/coa_dti_pro.yaml deleted file mode 100644 index 9ad501a6176a630b8edb97653607ef223946367d..0000000000000000000000000000000000000000 --- a/configs/preset/coa_dti_pro.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# @package _global_ -defaults: - - override /data/protein_featurizer: none - -model: - predictor: - _target_: deepscreen.models.predictors.coa_dti_pro.CoaDTIPro - - n_fingerprint: 20480 - n_word: 26 - dim: 512 - layer_output: 3 - layer_coa: 1 - nhead: 8 - dropout: 0.1 - co_attention: 'inter' - gcn_pooling: False - - esm_model_and_alphabet: - _target_: esm.pretrained.load_model_and_alphabet - model_name: resources/models/esm/esm1_t6_43M_UR50S.pt - -data: - drug_featurizer: - _target_: deepscreen.models.predictors.coa_dti_pro.drug_featurizer - _partial_: true - radius: 2 - batch_size: 1 diff --git a/configs/preset/deep_conv_dti.yaml b/configs/preset/deep_conv_dti.yaml deleted file mode 100644 index 025b151cf1539205f245feb41f9fd60f133c4497..0000000000000000000000000000000000000000 --- a/configs/preset/deep_conv_dti.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: ecfp - - override /data/protein_featurizer: label - -model: - predictor: - _target_: deepscreen.models.predictors.deep_conv_dti.DeepConvDTI - - activation: - _target_: torch.nn.ELU - - dropout: 0.0 - drug_layers: [512, 128] - protein_windows: [10, 15, 20, 25, 30] - n_filters: 128 - decay: 0.0001 - convolution: true - protein_layers: [128,] - fc_layers: [128,] - -data: - batch_size: 512 diff --git a/configs/preset/deep_dta.yaml b/configs/preset/deep_dta.yaml deleted file mode 100644 index f72d5c942226d88e798cb6a2fd6fef080df69053..0000000000000000000000000000000000000000 --- a/configs/preset/deep_dta.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: label - - override /data/protein_featurizer: label - - override /model/predictor: deep_dta - -data: - batch_size: 512 diff --git a/configs/preset/deep_dtaf.yaml b/configs/preset/deep_dtaf.yaml deleted file mode 100644 index a2f2a8d143d28834b4c94e300c463bc776e23adf..0000000000000000000000000000000000000000 --- a/configs/preset/deep_dtaf.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: label - - override /data/protein_featurizer: label - - override /model/predictor: deep_dta - -data: - drug_featurizer: - charset: {'Z', 'Y', 'H', '[', 'O', ']', '5', 'M', 'K', '.', '9', 'e', - '(', 'l', 'U', 'V', 'L', 'B', 'y', 'm', 'd', 'h', 'T', 'A', - 'W', 'b', 'i', 'D', 'R', '8', '/', 's', '#', 'u', '+', '@', - 'n', '%', 'F', 'r', 't', 'I', 'S', '6', 'P', 'G', 'f', ')', - '-', '\\', 'C', 'E', 'o', '3', '2', '1', '=', 'g', 'c', 'N', - '7', '4', 'a', '0'] - batch_size: 512 - -model: - predictor: - smi_charset_len: ${eval:'len(${data.protein_featurizer.charset})+1'} diff --git a/configs/preset/drug_ban.yaml b/configs/preset/drug_ban.yaml deleted file mode 100644 index 537c9d79276383542e956ff4ef27178c4c358fc4..0000000000000000000000000000000000000000 --- a/configs/preset/drug_ban.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# @package _global_ -defaults: - - override /data/protein_featurizer: label - -model: - predictor: - _target_: deepscreen.models.predictors.drug_ban.DrugBAN - - drug_in_feats: 75 - drug_embedding: 128 - drug_hidden_feats: [128, 128, 128] - drug_padding: True - protein_emb_dim: 128 - num_filters: [128, 128, 128] - kernel_size: [3, 6, 9] - protein_padding: True - mlp_in_dim: 256 - mlp_hidden_dim: 512 - mlp_out_dim: 128 - ban_heads: 2 - -data: - drug_featurizer: - _target_: deepscreen.models.predictors.drug_ban.drug_featurizer - _partial_: true - max_drug_nodes: 330 - - batch_size: 512 diff --git a/configs/preset/drug_vqa.yaml b/configs/preset/drug_vqa.yaml deleted file mode 100644 index 26361c45c25c936a1d9c9b28942343ba7a96a127..0000000000000000000000000000000000000000 --- a/configs/preset/drug_vqa.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: tokenizer - - override /data/protein_featurizer: label - - override /model/predictor: drug_vqa - -model: - loss: - _target_: deepscreen.models.loss.multitask_loss.MultitaskWeightedLoss - loss_fns: - - ${task.loss} - - _target_: deepscreen.models.predictors.drug_vqa.AttentionL2Regularization - weights: [1, 0.001] - -data: - batch_size: 512 - drug_featurizer: - tokenizer: - _target_: deepscreen.data.featurizers.token.SmilesTokenizer - vocab_file: resources/vocabs/drug_vqa/combinedVoc-wholeFour.voc - regex_pattern: '(\[[^\[\]]{1,10}\])' \ No newline at end of file diff --git a/configs/preset/graph_dta.yaml b/configs/preset/graph_dta.yaml deleted file mode 100644 index 159cb4e619f252663dccda18294600602674e01d..0000000000000000000000000000000000000000 --- a/configs/preset/graph_dta.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: graph - - override /data/protein_featurizer: label - - override /model/predictor: graph_dta - -data: - batch_size: 512 \ No newline at end of file diff --git a/configs/preset/hyper_attention_dti.yaml b/configs/preset/hyper_attention_dti.yaml deleted file mode 100644 index ae63593517d0ce34310ba3f0dbf1201f66e7e3a5..0000000000000000000000000000000000000000 --- a/configs/preset/hyper_attention_dti.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: label - - override /data/protein_featurizer: label - - override /model/predictor: hyper_attention_dti - -data: - batch_size: 32 \ No newline at end of file diff --git a/configs/preset/m_graph_dta.yaml b/configs/preset/m_graph_dta.yaml deleted file mode 100644 index 8609b499b94168c7c5da2b5779b310be5c769020..0000000000000000000000000000000000000000 --- a/configs/preset/m_graph_dta.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: graph - - override /data/protein_featurizer: label - -model: - predictor: - _target_: deepscreen.models.predictors.m_graph_dta.MGraphDTA - block_num: 3 - vocab_protein_size: ${eval:'len(${data.protein_featurizer.charset})+1'} - embedding_size: 128 - filter_num: 32 - -data: - drug_featurizer: - atom_features: - _target_: deepscreen.models.predictors.m_graph_dta.atom_features - _partial_: true - batch_size: 512 - -trainer: - precision: 'bf16' \ No newline at end of file diff --git a/configs/preset/mol_trans.yaml b/configs/preset/mol_trans.yaml deleted file mode 100644 index ed7fcf5ce9d43b0e047b9720cb544faf707fb64f..0000000000000000000000000000000000000000 --- a/configs/preset/mol_trans.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: fcs - - override /data/protein_featurizer: fcs - -data: - batch_size: 16 - drug_featurizer: - max_sequence_length: 205 - protein_featurizer: - max_sequence_length: 545 - -model: - predictor: - _target_: deepscreen.models.predictors.mol_trans.MolTrans - - input_dim_drug: 23532 - input_dim_target: 16693 - max_drug_seq: ${data.drug_featurizer.max_sequence_length} - max_protein_seq: ${data.protein_featurizer.max_sequence_length} - emb_size: 384 - dropout_rate: 0.1 - - # DenseNet - scale_down_ratio: 0.25 - growth_rate: 20 - transition_rate: 0.5 - num_dense_blocks: 4 - kernal_dense_size: 3 - - # Encoder - intermediate_size: 1536 - num_attention_heads: 12 - attention_probs_dropout_prob: 0.1 - hidden_dropout_prob: 0.1 - #flatten_dim: 293412 - - optimizer: - lr: 1e-6 \ No newline at end of file diff --git a/configs/preset/monn.yaml b/configs/preset/monn.yaml deleted file mode 100644 index 117031aaf9f51f3f935c0a679af40883474b7880..0000000000000000000000000000000000000000 --- a/configs/preset/monn.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# @package _global_ -defaults: - - dti_experiment - # TODO MONN featurizers not fully implemented yet - - override /data/drug_featurizer: label - - override /data/protein_featurizer: label - - override /model/predictor: monn - - override /task: binary - - _self_ - -model: - loss: - _target_: deepscreen.models.loss.multitask_loss.MultitaskWeightedLoss - loss_fns: - - _target_: ${model.loss} - - _target_: deepscreen.models.predictors.monn.MaskedBCELoss - weights: [1, 0.1] diff --git a/configs/preset/transformer_cpi_2.yaml b/configs/preset/transformer_cpi_2.yaml deleted file mode 100644 index e1c224f88a6de1266ee4d836c15ff5e88b06f119..0000000000000000000000000000000000000000 --- a/configs/preset/transformer_cpi_2.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# @package _global_ -defaults: - - override /data/drug_featurizer: mol_features - - override /data/protein_featurizer: tokenizer - -model: - predictor: - _target_: deepscreen.models.predictors.transformer_cpi_2.TransformerCPI2 - - encoder: - _target_: deepscreen.models.predictors.transformer_cpi_2.Encoder - # /model/protein_encoder@pretrain: tape_bert - n_layers: 3 - pretrain: - _target_: tape.ProteinBertModel.from_pretrained - pretrained_model_name_or_path: resources/models/tape/bert-base/ # bert-base - - decoder: - _target_: deepscreen.models.predictors.transformer_cpi_2.Decoder - n_layers: 3 - dropout: 0.1 - -data: - batch_size: 16 - collator: - automatic_padding: True - - protein_featurizer: - tokenizer: - _target_: tape.TAPETokenizer.from_pretrained - vocab: iupac - -# collator: -# _target_: deepscreen.models.predictors.transformer_cpi_2.pack -# _partial_: true diff --git a/configs/sweep/chembl33_benchmark.yaml b/configs/sweep/chembl33_benchmark.yaml deleted file mode 100644 index b18bcbb0656411aa393181a5bfd79663d144dc59..0000000000000000000000000000000000000000 --- a/configs/sweep/chembl33_benchmark.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# @package _global_ -tags: ['sweep', 'benchmark'] -hydra: - sweeper: - params: - experiment: kinase,membrane_receptors,non_kinase_enzymes #,ion_channels,nuclear_receptors,other_protein_targets \ No newline at end of file diff --git a/configs/sweep/ddp_multirun.yaml b/configs/sweep/ddp_multirun.yaml deleted file mode 100644 index fbf6641a20019ae41cf8d231d01433b2562a9c77..0000000000000000000000000000000000000000 --- a/configs/sweep/ddp_multirun.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# @package _global_ -hydra: - sweeper: - params: - preset: graph_dta,deep_dta # drug_vqa,mol_trans,hyper_attention_dti,transformer_cpi_2 - experiment: kiba,davis,bindingdb \ No newline at end of file diff --git a/configs/sweep/dti_benchmark.yaml b/configs/sweep/dti_benchmark.yaml deleted file mode 100644 index b404a86d52131cbde1d7b877be92a80d8000d5b8..0000000000000000000000000000000000000000 --- a/configs/sweep/dti_benchmark.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -tags: ['sweep', 'benchmark'] -hydra: - sweeper: - params: - preset: transformer_cpi_2 #graph_dta,deep_dta #,mol_trans,hyper_attention_dti,m_graph_dta - experiment: other_protein_targets - diff --git a/configs/sweep/example_multirun.yaml b/configs/sweep/example_multirun.yaml deleted file mode 100644 index b2d7edad480a88165ca51475c7fd076110d2fef0..0000000000000000000000000000000000000000 --- a/configs/sweep/example_multirun.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# @package _global_ -hydra: - sweeper: - params: - preset: graph_dta,deep_dta - experiment: chembl_random diff --git a/configs/sweep/example_multirun_test.yaml b/configs/sweep/example_multirun_test.yaml deleted file mode 100644 index a609000341c88314547dfe300ec59e059c0d9825..0000000000000000000000000000000000000000 --- a/configs/sweep/example_multirun_test.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# @package _global_ -hydra: - sweeper: - params: -# ckpt_path: "'C:/Users/libok/Documents/GitHub/DeepScreen/logs/train/runs/2023-11-07_23-21-55-442205_[debug]/checkpoints/epoch_000.ckpt','C:/Users/libok/Documents/GitHub/DeepScreen/logs/train/runs/2023-11-07_19-46-08-740035_[debug]/checkpoints/epoch_000.ckpt'" - data.data_file: dti_benchmark/random_split_update/davis_reserved_test.csv,dti_benchmark/random_split_update/kiba_reserved_test.csv - ckpt_path: "'C:/Users/libok/Documents/GitHub/DeepScreen/logs/test/multiruns/2023-11-10_10-31-15-339335_[multirun,test,dev]/metrics_summary.csv'" \ No newline at end of file diff --git a/configs/sweep/example_submitit.yaml b/configs/sweep/example_submitit.yaml deleted file mode 100644 index 510522a9cb855b9afd2ac3ebd6a0f50baea93459..0000000000000000000000000000000000000000 --- a/configs/sweep/example_submitit.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# @package _global_ -defaults: - - basic - - override /hydra/launcher: submitit_local - -hydra: - sweeper: - params: - preset: graph_dta,deep_dta - experiment: chembl_random - launcher: - timeout_min: 60 - cpus_per_task: 1 - gpus_per_node: 1 - tasks_per_node: 4 - mem_gb: 32 - nodes: 1 \ No newline at end of file diff --git a/configs/sweep/experiment_summary.csv b/configs/sweep/experiment_summary.csv deleted file mode 100644 index 73e924007e4c3936dadc918f7cece50574336c3e..0000000000000000000000000000000000000000 --- a/configs/sweep/experiment_summary.csv +++ /dev/null @@ -1,21 +0,0 @@ -test/loss,test/auprc,test/auroc,test/f1_score,test/precision,test/recall,test/sensitivity,test/specificity,ckpt_path,job_status,preset,experiment,sweep,tags,model.optimizer.lr,local,data.batch_size -0.1525115370750427,0.735183835029602,0.9363504648208618,0.661261260509491,0.7816826701164246,0.5729898810386658,0.5729898810386658,0.984080135822296,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=kiba;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_041.ckpt",COMPLETED,deep_dta,kiba,slurm_test,"[benchmark,ddta,gdta]",,, -0.193635880947113,0.4078962802886963,0.8581838607788086,0.3916666805744171,0.5371428728103638,0.3081967234611511,0.3081967234611511,0.9788622260093688,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_008.ckpt",COMPLETED,deep_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.5104637742042542,0.7619075775146484,0.8163812756538391,0.6808292269706726,0.6954700350761414,0.6667922139167786,0.6667922139167786,0.789406955242157,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=bindingdb;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_003.ckpt",COMPLETED,deep_dta,bindingdb,slurm_test,"[benchmark,ddta,gdta]",,, -0.2221491634845733,0.3560383319854736,0.78690105676651,0.2659846544265747,0.604651153087616,0.1704917997121811,0.1704917997121811,0.9911273717880248,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_054.ckpt",COMPLETED,graph_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.2281630784273147,0.4483628869056701,0.8383051156997681,0.2730720639228821,0.7176079750061035,0.1686182618141174,0.1686182618141174,0.9933990836143494,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=kiba;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_065.ckpt",COMPLETED,graph_dta,kiba,slurm_test,"[benchmark,ddta,gdta]",,, -0.4569543302059173,0.828099250793457,0.8623664379119873,0.7299983501434326,0.7756586670875549,0.6894148588180542,0.6894148588180542,0.8561919927597046,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=bindingdb;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_034.ckpt",COMPLETED,graph_dta,bindingdb,slurm_test,"[benchmark,ddta,gdta]",,, -0.1881016194820404,0.5213174819946289,0.8876519203186035,0.5207100510597229,0.6534653306007385,0.4327868819236755,0.4327868819236755,0.9817327857017516,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=davis;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_017.ckpt",COMPLETED,hyper_attention_dti,davis,slurm_test,,,, -0.144176036119461,0.7704265117645264,0.9501243829727172,0.6897732019424438,0.7632575631141663,0.6291959285736084,0.6291959285736084,0.980585515499115,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=kiba;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_014.ckpt",COMPLETED,hyper_attention_dti,kiba,slurm_test,,,, -0.3445079922676086,0.915877878665924,0.934882640838623,0.8371888399124146,0.8385065793991089,0.8358752131462097,0.8358752131462097,0.8838841319084167,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=bindingdb;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_017.ckpt",COMPLETED,hyper_attention_dti,bindingdb,slurm_test,,,, -0.3915603458881378,0.3016493022441864,0.7910888195037842,0.085626907646656,0.6363636255264282,0.0459016375243663,0.0459016375243663,0.9979122877120972,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_18-50-02-680654_[mol_trans,benchmark]/experiment=davis;preset=mol_trans;sweep=slurm_test/checkpoints/epoch_000.ckpt",COMPLETED,mol_trans,davis,slurm_test,,,, -0.1662539541721344,0.7397991418838501,0.9369943141937256,0.6784178614616394,0.7550238966941833,0.6159250736236572,0.6159250736236572,0.9801195859909058,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_18-50-02-680654_[mol_trans,benchmark]/experiment=kiba;preset=mol_trans;sweep=slurm_test/checkpoints/epoch_008.ckpt",COMPLETED,mol_trans,kiba,slurm_test,,,, -0.2160931378602981,0.3230183720588684,0.796424388885498,0.3271028101444244,0.5691056847572327,0.2295081913471222,0.2295081913471222,0.9861690998077391,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_20-20-11-193061_[tcpi2,kiba,davis]/experiment=davis;model.optimizer.lr=1e-06;preset=transformer_cpi_2;sweep=slurm_test;tags=[tcpi2,kiba,davis]/checkpoints/epoch_023.ckpt",COMPLETED,transformer_cpi_2,davis,slurm_test,"[tcpi2,kiba,davis]",1e-06,, -0.2221459150314331,0.5159928798675537,0.8539965152740479,0.4716618657112121,0.612983763217926,0.3832943141460418,0.3832943141460418,0.9759260416030884,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_20-20-11-193061_[tcpi2,kiba,davis]/experiment=kiba;model.optimizer.lr=1e-06;preset=transformer_cpi_2;sweep=slurm_test;tags=[tcpi2,kiba,davis]/checkpoints/epoch_077.ckpt",COMPLETED,transformer_cpi_2,kiba,slurm_test,"[tcpi2,kiba,davis]",1e-06,, -0.1689525544643402,0.5410239696502686,0.9028607606887816,0.4678111672401428,0.6770186424255371,0.3573770523071289,0.3573770523071289,0.986430048942566,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=davis;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_012.ckpt",COMPLETED,deep_conv_dti,davis,,"[deep_conv_dti,benchmark]",,gpu3090, -0.1273866593837738,0.8186066746711731,0.95548677444458,0.7409326434135437,0.8289855122566223,0.6697892546653748,0.6697892546653748,0.9862545728683472,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=kiba;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_026.ckpt",COMPLETED,deep_conv_dti,kiba,,"[deep_conv_dti,benchmark]",,gpu3090, -0.3221746683120727,0.9174473285675048,0.9374850988388062,0.8440826535224915,0.8421315550804138,0.846042811870575,0.846042811870575,0.8856043219566345,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=bindingdb;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_008.ckpt",COMPLETED,deep_conv_dti,bindingdb,,"[deep_conv_dti,benchmark]",,gpu3090, -0.3808988630771637,0.9017271995544434,0.92254638671875,0.8203105330467224,0.818312406539917,0.822318434715271,0.822318434715271,0.8683114647865295,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-21-02-704802_[mol_trans,bindingdb]/experiment=bindingdb;local=gpu3090;preset=mol_trans;tags=[mol_trans,bindingdb]/checkpoints/epoch_064.ckpt",COMPLETED,mol_trans,bindingdb,,"[mol_trans,bindingdb]",,gpu3090, -0.202189102768898,0.3614169955253601,0.8401678204536438,0.330232560634613,0.5680000185966492,0.2327868789434433,0.2327868789434433,0.9859081506729126,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-27-18-387556_[m_graph_dta,benchmark]/experiment=davis;local=gpu3090;preset=m_graph_dta;tags=[m_graph_dta,benchmark]/checkpoints/epoch_094.ckpt",COMPLETED,m_graph_dta,davis,,"[m_graph_dta,benchmark]",,gpu3090, -0.1921116858720779,0.6082723736763,0.8893994092941284,0.4803767800331116,0.7285714149475098,0.3583138287067413,0.3583138287067413,0.9867205023765564,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-27-18-387556_[m_graph_dta,benchmark]/experiment=kiba;local=gpu3090;preset=m_graph_dta;tags=[m_graph_dta,benchmark]/checkpoints/epoch_246.ckpt",COMPLETED,m_graph_dta,kiba,,"[m_graph_dta,benchmark]",,gpu3090, -0.6574236750602722,0.5444095730781555,0.6328268051147461,0.483252614736557,0.5203881859779358,0.4510641098022461,0.4510641098022461,0.7001991868019104,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-11-06_14-16-25-154180_[benchmark,tcpi2,bindingdb]/data.batch_size=32;experiment=bindingdb;preset=transformer_cpi_2;tags=[benchmark,tcpi2,bindingdb]/checkpoints/epoch_017.ckpt",COMPLETED,transformer_cpi_2,bindingdb,,"[benchmark,tcpi2,bindingdb]",,,32.0 diff --git a/configs/sweep/experiment_summary_bindingdb.csv b/configs/sweep/experiment_summary_bindingdb.csv deleted file mode 100644 index b2a3ee1ab0192c82662c03cf59ed07bda14b1862..0000000000000000000000000000000000000000 --- a/configs/sweep/experiment_summary_bindingdb.csv +++ /dev/null @@ -1,7 +0,0 @@ -test/loss,test/auprc,test/auroc,test/f1_score,test/precision,test/recall,test/sensitivity,test/specificity,ckpt_path,job_status,preset,experiment,sweep,tags,model.optimizer.lr,local,data.batch_size -0.5104637742042542,0.7619075775146484,0.8163812756538391,0.6808292269706726,0.6954700350761414,0.6667922139167786,0.6667922139167786,0.789406955242157,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=bindingdb;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_003.ckpt",COMPLETED,deep_dta,bindingdb,slurm_test,"[benchmark,ddta,gdta]",,, -0.4569543302059173,0.828099250793457,0.8623664379119873,0.7299983501434326,0.7756586670875549,0.6894148588180542,0.6894148588180542,0.8561919927597046,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=bindingdb;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_034.ckpt",COMPLETED,graph_dta,bindingdb,slurm_test,"[benchmark,ddta,gdta]",,, -0.3445079922676086,0.915877878665924,0.934882640838623,0.8371888399124146,0.8385065793991089,0.8358752131462097,0.8358752131462097,0.8838841319084167,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=bindingdb;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_017.ckpt",COMPLETED,hyper_attention_dti,bindingdb,slurm_test,,,, -0.3221746683120727,0.9174473285675048,0.9374850988388062,0.8440826535224915,0.8421315550804138,0.846042811870575,0.846042811870575,0.8856043219566345,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=bindingdb;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_008.ckpt",COMPLETED,deep_conv_dti,bindingdb,,"[deep_conv_dti,benchmark]",,gpu3090, -0.3808988630771637,0.9017271995544434,0.92254638671875,0.8203105330467224,0.818312406539917,0.822318434715271,0.822318434715271,0.8683114647865295,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-21-02-704802_[mol_trans,bindingdb]/experiment=bindingdb;local=gpu3090;preset=mol_trans;tags=[mol_trans,bindingdb]/checkpoints/epoch_064.ckpt",COMPLETED,mol_trans,bindingdb,,"[mol_trans,bindingdb]",,gpu3090, -0.6574236750602722,0.5444095730781555,0.6328268051147461,0.483252614736557,0.5203881859779358,0.4510641098022461,0.4510641098022461,0.7001991868019104,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-11-06_14-16-25-154180_[benchmark,tcpi2,bindingdb]/data.batch_size=32;experiment=bindingdb;preset=transformer_cpi_2;tags=[benchmark,tcpi2,bindingdb]/checkpoints/epoch_017.ckpt",COMPLETED,transformer_cpi_2,bindingdb,,"[benchmark,tcpi2,bindingdb]",,,32.0 diff --git a/configs/sweep/experiment_summary_davis.csv b/configs/sweep/experiment_summary_davis.csv deleted file mode 100644 index 1cc9af384ea7c91cdc510d734862bbe56edc3f43..0000000000000000000000000000000000000000 --- a/configs/sweep/experiment_summary_davis.csv +++ /dev/null @@ -1,8 +0,0 @@ -test/loss,test/auprc,test/auroc,test/f1_score,test/precision,test/recall,test/sensitivity,test/specificity,ckpt_path,job_status,preset,experiment,sweep,tags,model.optimizer.lr,local,data.batch_size -0.193635880947113,0.4078962802886963,0.8581838607788086,0.3916666805744171,0.5371428728103638,0.3081967234611511,0.3081967234611511,0.9788622260093688,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_008.ckpt",COMPLETED,deep_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.2221491634845733,0.3560383319854736,0.78690105676651,0.2659846544265747,0.604651153087616,0.1704917997121811,0.1704917997121811,0.9911273717880248,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_054.ckpt",COMPLETED,graph_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.1881016194820404,0.5213174819946289,0.8876519203186035,0.5207100510597229,0.6534653306007385,0.4327868819236755,0.4327868819236755,0.9817327857017516,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=davis;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_017.ckpt",COMPLETED,hyper_attention_dti,davis,slurm_test,,,, -0.3915603458881378,0.3016493022441864,0.7910888195037842,0.085626907646656,0.6363636255264282,0.0459016375243663,0.0459016375243663,0.9979122877120972,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_18-50-02-680654_[mol_trans,benchmark]/experiment=davis;preset=mol_trans;sweep=slurm_test/checkpoints/epoch_000.ckpt",COMPLETED,mol_trans,davis,slurm_test,,,, -0.2160931378602981,0.3230183720588684,0.796424388885498,0.3271028101444244,0.5691056847572327,0.2295081913471222,0.2295081913471222,0.9861690998077391,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_20-20-11-193061_[tcpi2,kiba,davis]/experiment=davis;model.optimizer.lr=1e-06;preset=transformer_cpi_2;sweep=slurm_test;tags=[tcpi2,kiba,davis]/checkpoints/epoch_023.ckpt",COMPLETED,transformer_cpi_2,davis,slurm_test,"[tcpi2,kiba,davis]",1e-06,, -0.1689525544643402,0.5410239696502686,0.9028607606887816,0.4678111672401428,0.6770186424255371,0.3573770523071289,0.3573770523071289,0.986430048942566,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=davis;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_012.ckpt",COMPLETED,deep_conv_dti,davis,,"[deep_conv_dti,benchmark]",,gpu3090, -0.202189102768898,0.3614169955253601,0.8401678204536438,0.330232560634613,0.5680000185966492,0.2327868789434433,0.2327868789434433,0.9859081506729126,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-27-18-387556_[m_graph_dta,benchmark]/experiment=davis;local=gpu3090;preset=m_graph_dta;tags=[m_graph_dta,benchmark]/checkpoints/epoch_094.ckpt",COMPLETED,m_graph_dta,davis,,"[m_graph_dta,benchmark]",,gpu3090, diff --git a/configs/sweep/experiment_summary_kiba.csv b/configs/sweep/experiment_summary_kiba.csv deleted file mode 100644 index fea61408c0c735bb75f05494db511650a4beff79..0000000000000000000000000000000000000000 --- a/configs/sweep/experiment_summary_kiba.csv +++ /dev/null @@ -1,8 +0,0 @@ -test/loss,test/auprc,test/auroc,test/f1_score,test/precision,test/recall,test/sensitivity,test/specificity,ckpt_path,job_status,preset,experiment,sweep,tags,model.optimizer.lr,local,data.batch_size -0.1525115370750427,0.735183835029602,0.9363504648208618,0.661261260509491,0.7816826701164246,0.5729898810386658,0.5729898810386658,0.984080135822296,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=kiba;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_041.ckpt",COMPLETED,deep_dta,kiba,slurm_test,"[benchmark,ddta,gdta]",,, -0.2281630784273147,0.4483628869056701,0.8383051156997681,0.2730720639228821,0.7176079750061035,0.1686182618141174,0.1686182618141174,0.9933990836143494,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=kiba;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_065.ckpt",COMPLETED,graph_dta,kiba,slurm_test,"[benchmark,ddta,gdta]",,, -0.144176036119461,0.7704265117645264,0.9501243829727172,0.6897732019424438,0.7632575631141663,0.6291959285736084,0.6291959285736084,0.980585515499115,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_18-08-40-028557_[sweep,benchmark]/experiment=kiba;preset=hyper_attention_dti;sweep=slurm_test/checkpoints/epoch_014.ckpt",COMPLETED,hyper_attention_dti,kiba,slurm_test,,,, -0.1662539541721344,0.7397991418838501,0.9369943141937256,0.6784178614616394,0.7550238966941833,0.6159250736236572,0.6159250736236572,0.9801195859909058,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_18-50-02-680654_[mol_trans,benchmark]/experiment=kiba;preset=mol_trans;sweep=slurm_test/checkpoints/epoch_008.ckpt",COMPLETED,mol_trans,kiba,slurm_test,,,, -0.2221459150314331,0.5159928798675537,0.8539965152740479,0.4716618657112121,0.612983763217926,0.3832943141460418,0.3832943141460418,0.9759260416030884,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_20-20-11-193061_[tcpi2,kiba,davis]/experiment=kiba;model.optimizer.lr=1e-06;preset=transformer_cpi_2;sweep=slurm_test;tags=[tcpi2,kiba,davis]/checkpoints/epoch_077.ckpt",COMPLETED,transformer_cpi_2,kiba,slurm_test,"[tcpi2,kiba,davis]",1e-06,, -0.1273866593837738,0.8186066746711731,0.95548677444458,0.7409326434135437,0.8289855122566223,0.6697892546653748,0.6697892546653748,0.9862545728683472,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=kiba;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_026.ckpt",COMPLETED,deep_conv_dti,kiba,,"[deep_conv_dti,benchmark]",,gpu3090, -0.1921116858720779,0.6082723736763,0.8893994092941284,0.4803767800331116,0.7285714149475098,0.3583138287067413,0.3583138287067413,0.9867205023765564,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-23_23-27-18-387556_[m_graph_dta,benchmark]/experiment=kiba;local=gpu3090;preset=m_graph_dta;tags=[m_graph_dta,benchmark]/checkpoints/epoch_246.ckpt",COMPLETED,m_graph_dta,kiba,,"[m_graph_dta,benchmark]",,gpu3090, diff --git a/configs/sweep/experiment_summary_test.csv b/configs/sweep/experiment_summary_test.csv deleted file mode 100644 index a1ce0b60c2e5c9e1cd678cc7a7a8867a2472d15f..0000000000000000000000000000000000000000 --- a/configs/sweep/experiment_summary_test.csv +++ /dev/null @@ -1,4 +0,0 @@ -test/loss,test/auprc,test/auroc,test/f1_score,test/precision,test/recall,test/sensitivity,test/specificity,ckpt_path,job_status,preset,experiment,sweep,tags,model.optimizer.lr,local,data.batch_size -0.193635880947113,0.4078962802886963,0.8581838607788086,0.3916666805744171,0.5371428728103638,0.3081967234611511,0.3081967234611511,0.9788622260093688,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=deep_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_008.ckpt",COMPLETED,deep_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.2221491634845733,0.3560383319854736,0.78690105676651,0.2659846544265747,0.604651153087616,0.1704917997121811,0.1704917997121811,0.9911273717880248,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/experiment=davis;preset=graph_dta;sweep=slurm_test;tags=[benchmark,ddta,gdta]/checkpoints/epoch_054.ckpt",COMPLETED,graph_dta,davis,slurm_test,"[benchmark,ddta,gdta]",,, -0.1689525544643402,0.5410239696502686,0.9028607606887816,0.4678111672401428,0.6770186424255371,0.3573770523071289,0.3573770523071289,0.986430048942566,"/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-20_21-25-54-050914_[deep_conv_dti,benchmark]/experiment=davis;local=gpu3090;preset=deep_conv_dti;tags=[deep_conv_dti,benchmark]/checkpoints/epoch_012.ckpt",COMPLETED,deep_conv_dti,davis,,"[deep_conv_dti,benchmark]",,gpu3090, diff --git a/configs/sweep/midterm_benchmark.yaml b/configs/sweep/midterm_benchmark.yaml deleted file mode 100644 index 17c6fbef87d5df1a6b3a5362a3332f710c30031b..0000000000000000000000000000000000000000 --- a/configs/sweep/midterm_benchmark.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# @package _global_ -tags: ['midterm', 'benchmark'] -hydra: - sweeper: - params: - experiment: kinase,membrane_receptors,non_kinase_enzymes #,ion_channels,nuclear_receptors,other_protein_targets \ No newline at end of file diff --git a/configs/sweep/multi_test.yaml b/configs/sweep/multi_test.yaml deleted file mode 100644 index 8fe120a7805b77823572d7ed83ab168ec685c939..0000000000000000000000000000000000000000 --- a/configs/sweep/multi_test.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -hydra: - sweeper: - params: -# ckpt_path: "'C:/Users/libok/Documents/GitHub/DeepScreen/logs/train/runs/2023-11-07_23-21-55-442205_[debug]/checkpoints/epoch_000.ckpt','C:/Users/libok/Documents/GitHub/DeepScreen/logs/train/runs/2023-11-07_19-46-08-740035_[debug]/checkpoints/epoch_000.ckpt'" - data.data_file: dti_benchmark/random_split_update/bindingdb_reserved_test.csv -# ckpt_path: "'/gpfs/work/pha/daiyunhuang/WavyWaffle/logs/train/multiruns/2023-10-16_14-53-13-645337_[benchmark,ddta,gdta]/metrics_summary.csv'" -# ckpt_path: "/gpfs/work/pha/daiyunhuang/WavyWaffle/configs/sweep/experiment_summary_bindingdb.csv" diff --git a/configs/sweep/slurm_test.yaml b/configs/sweep/slurm_test.yaml deleted file mode 100644 index 530ab519eed0ffe86445f2960c667e4484097459..0000000000000000000000000000000000000000 --- a/configs/sweep/slurm_test.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# @package _global_ -tags: ['sweep', 'benchmark'] -hydra: - sweeper: - params: - preset: graph_dta,deep_dta,drug_vqa, mol_trans,hyper_attention_dti,transformer_cpi_2 - experiment: kiba,davis,bindingdb - diff --git a/configs/task/binary.yaml b/configs/task/binary.yaml deleted file mode 100644 index 31eafe56289ebcb591ddd4254a943015c0d3e429..0000000000000000000000000000000000000000 --- a/configs/task/binary.yaml +++ /dev/null @@ -1,12 +0,0 @@ -task: binary -num_classes: null - -out: - _target_: torch.nn.LazyLinear - out_features: 1 - -loss: - _target_: torch.nn.BCEWithLogitsLoss - -activation: - _target_: torch.nn.Sigmoid \ No newline at end of file diff --git a/configs/task/multiclass.yaml b/configs/task/multiclass.yaml deleted file mode 100644 index 686e9f9b96c98839ec4bdad32351d29aca026618..0000000000000000000000000000000000000000 --- a/configs/task/multiclass.yaml +++ /dev/null @@ -1,12 +0,0 @@ -task: multiclass -num_classes: 3 - -out: - _target_: torch.nn.LazyLinear - out_features: ${num_classes} - -loss: - _target_: torch.nn.CrossEntropyLoss - -activation: - _target_: torch.nn.Softmax \ No newline at end of file diff --git a/configs/task/regression.yaml b/configs/task/regression.yaml deleted file mode 100644 index 0de6eeb0179fcf63608df8a62719a5f6792baf1d..0000000000000000000000000000000000000000 --- a/configs/task/regression.yaml +++ /dev/null @@ -1,12 +0,0 @@ -task: regression -num_classes: null - -out: - _target_: torch.nn.LazyLinear - out_features: 1 - -loss: - _target_: torch.nn.MSELoss - -activation: - _target_: torch.nn.Identity \ No newline at end of file diff --git a/configs/test.yaml b/configs/test.yaml deleted file mode 100644 index cc96bf8ed23b5f342b1bd2e49d39a31e401a5db4..0000000000000000000000000000000000000000 --- a/configs/test.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# @package _global_ -defaults: - - model: dti_model # fixed for web server version - - task: null - - data: dti_data # fixed for web server version - - callbacks: - - model_summary - - rich_progress_bar - - logger: csv - - trainer: default - - paths: default - - extras: default - - hydra: default -# - hydra/callbacks: -# - csv_metrics_summary - - _self_ - - preset: null - - experiment: null - - sweep: null - - debug: null - - optional local: default - - -job_name: "test" - -tags: ??? - -# passing checkpoint path is necessary for evaluation -ckpt_path: ??? - -model: - optimizer: null - scheduler: null \ No newline at end of file diff --git a/configs/train.yaml b/configs/train.yaml deleted file mode 100644 index 8ccc566f93ae5ce7d80ebe65e215b6df3877c533..0000000000000000000000000000000000000000 --- a/configs/train.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# @package _global_ - -# specify here default configuration -# order of defaults determines the order in which configs override each other -defaults: - - model: dti_model # fixed for web server version - - task: null - - data: dti_data # fixed for web server version - - callbacks: default - - logger: multiple_loggers # set logger here or use command line (e.g. `python train.py logger=tensorboard`) - - trainer: default - - paths: default - - extras: default - - hydra: default - - - _self_ - - - preset: null - # Experiment configs allow for version control of specific hyperparameters - # e.g. the best hyperparameters for given model and data - - experiment: null - # Sweeping config, for batch experiments, hyperparameter optimization, etc. - - sweep: null - # Debugging config (enable through command line, e.g. `python train.py debug=default) - - debug: null - - - optional local: default - -# job name, determines output directory path -job_name: "train" - -# tags to help you identify your experiments -# you can overwrite this in experiment configs -# overwrite from command line with `python train.py tags="[first_tag, second_tag]"` -tags: ??? - -# evaluate on test set, using best model weights achieved during training -# lightning chooses best weights based on the metric specified in checkpoint callback -# test: True - -# compile model for faster training with PyTorch 2.0+ -compile: False - -# simply provide checkpoint path to resume training -ckpt_path: null - -# seed for random number generators in torch, numpy and python.random -seed: 12345 - -trainer: - check_val_every_n_epoch: 1 - limit_val_batches: ${eval:'1.0 if ${data.train_val_test_split}[1] else 0'} - num_sanity_val_steps: ${eval:'2 if ${data.train_val_test_split}[1] else 0'} diff --git a/configs/trainer/cpu.yaml b/configs/trainer/cpu.yaml deleted file mode 100644 index b7d6767e60c956567555980654f15e7bb673a41f..0000000000000000000000000000000000000000 --- a/configs/trainer/cpu.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - default - -accelerator: cpu -devices: 1 diff --git a/configs/trainer/ddp.yaml b/configs/trainer/ddp.yaml deleted file mode 100644 index 3c9778447171ba5f5282e1087aa61be2b4763622..0000000000000000000000000000000000000000 --- a/configs/trainer/ddp.yaml +++ /dev/null @@ -1,10 +0,0 @@ -defaults: - - default - -strategy: ddp - -accelerator: gpu -devices: 4 -num_nodes: 1 -sync_batchnorm: True -precision: 16-mixed diff --git a/configs/trainer/ddp_sim.yaml b/configs/trainer/ddp_sim.yaml deleted file mode 100644 index 8404419e5c295654967d0dfb73a7366e75be2f1f..0000000000000000000000000000000000000000 --- a/configs/trainer/ddp_sim.yaml +++ /dev/null @@ -1,7 +0,0 @@ -defaults: - - default - -# simulate DDP on CPU, useful for debugging -accelerator: cpu -devices: 2 -strategy: ddp_spawn diff --git a/configs/trainer/default.yaml b/configs/trainer/default.yaml deleted file mode 100644 index 5a00b4536dec28a739772e2cc798ab46b2dc2824..0000000000000000000000000000000000000000 --- a/configs/trainer/default.yaml +++ /dev/null @@ -1,18 +0,0 @@ -_target_: lightning.Trainer - -default_root_dir: ${paths.output_dir} - -min_epochs: 1 -max_epochs: 50 - -precision: 32 - -gradient_clip_val: 0.5 -gradient_clip_algorithm: norm - -# deterministic algorithms might make training slower but offers more reproducibility than only setting seeds -# True: use deterministic always, throwing an error on an operation that doesn't support deterministic -# warn: use deterministic when possible, throwing warnings on operations that don’t support deterministic -deterministic: warn - -inference_mode: True \ No newline at end of file diff --git a/configs/trainer/gpu.yaml b/configs/trainer/gpu.yaml deleted file mode 100644 index f9dc7f73609b2b2fe595d84288a3b556d8d3356c..0000000000000000000000000000000000000000 --- a/configs/trainer/gpu.yaml +++ /dev/null @@ -1,6 +0,0 @@ -defaults: - - default - -accelerator: gpu -devices: 1 -precision: 16-mixed \ No newline at end of file diff --git a/configs/trainer/mps.yaml b/configs/trainer/mps.yaml deleted file mode 100644 index 1ecf6d5cc3a34ca127c5510f4a18e989561e38e4..0000000000000000000000000000000000000000 --- a/configs/trainer/mps.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - default - -accelerator: mps -devices: 1 diff --git a/configs/webserver_inference.yaml b/configs/webserver_inference.yaml deleted file mode 100644 index 194b8b81dff65bdbfe260c106e09bd7d5eaa0e42..0000000000000000000000000000000000000000 --- a/configs/webserver_inference.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# @package _global_ -defaults: - - model: dti_model # fixed for web server version - - task: null - - data: dti_data # fixed for web server version - - callbacks: null - - trainer: default - - paths: default - - extras: null - - hydra: null - - _self_ - - preset: null - - experiment: null - - sweep: null - - debug: null - - optional local: default - -job_name: "webserver_inference" - -tags: null - -# passing checkpoint path is necessary for prediction -ckpt_path: ??? - -paths: - output_dir: null - work_dir: null \ No newline at end of file