diff --git a/.buildinfo b/.buildinfo index 51945a1cf9..acbad833aa 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 3e16fc85e4d09f60ba66e881fe15276a +config: 21ab98e528573f73f1b5d8a9a8dc8e52 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt index 500c824edc..b5f5049398 100644 --- a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt +++ b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt @@ -1,16 +1,16 @@ -2024-07-14 21:54:13 (INFO): Running in non-distributed local mode -2024-07-14 21:54:13 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem -2024-07-14 21:54:14 (INFO): amp: true +2024-07-19 20:36:44 (INFO): Running in non-distributed local mode +2024-07-19 20:36:44 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem +2024-07-19 20:36:45 (INFO): amp: true cmd: - checkpoint_dir: fine-tuning/checkpoints/2024-07-14-21-54-08-ft-oxides - commit: c2f8928 + checkpoint_dir: fine-tuning/checkpoints/2024-07-19-20-37-20-ft-oxides + commit: f9ecf73 identifier: ft-oxides - logs_dir: fine-tuning/logs/tensorboard/2024-07-14-21-54-08-ft-oxides + logs_dir: fine-tuning/logs/tensorboard/2024-07-19-20-37-20-ft-oxides print_every: 10 - results_dir: fine-tuning/results/2024-07-14-21-54-08-ft-oxides + results_dir: fine-tuning/results/2024-07-19-20-37-20-ft-oxides seed: 0 - timestamp_id: 2024-07-14-21-54-08-ft-oxides - version: 0.1.dev1+gc2f8928 + timestamp_id: 2024-07-19-20-37-20-ft-oxides + version: 0.1.dev1+gf9ecf73 dataset: a2g_args: r_energy: true @@ -142,83 +142,83 @@ val_dataset: format: ase_db src: val.db -2024-07-14 21:54:14 (INFO): Loading dataset: ase_db -2024-07-14 21:54:14 (INFO): rank: 0: Sampler created... -2024-07-14 21:54:14 (INFO): Batch balancing is disabled for single GPU training. -2024-07-14 21:54:14 (INFO): rank: 0: Sampler created... -2024-07-14 21:54:14 (INFO): Batch balancing is disabled for single GPU training. -2024-07-14 21:54:14 (INFO): rank: 0: Sampler created... -2024-07-14 21:54:14 (INFO): Batch balancing is disabled for single GPU training. -2024-07-14 21:54:14 (INFO): Loading model: gemnet_oc -2024-07-14 21:54:14 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization'] -2024-07-14 21:54:17 (INFO): Loaded GemNetOC with 38864438 parameters. -2024-07-14 21:54:17 (WARNING): log_summary for Tensorboard not supported -2024-07-14 21:54:17 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated. -2024-07-14 21:54:17 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gnoc_oc22_oc20_all_s2ef.pt -2024-07-14 21:54:17 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. +2024-07-19 20:36:45 (INFO): Loading dataset: ase_db +2024-07-19 20:36:45 (INFO): rank: 0: Sampler created... +2024-07-19 20:36:45 (INFO): Batch balancing is disabled for single GPU training. +2024-07-19 20:36:45 (INFO): rank: 0: Sampler created... +2024-07-19 20:36:45 (INFO): Batch balancing is disabled for single GPU training. +2024-07-19 20:36:45 (INFO): rank: 0: Sampler created... +2024-07-19 20:36:45 (INFO): Batch balancing is disabled for single GPU training. +2024-07-19 20:36:45 (INFO): Loading model: gemnet_oc +2024-07-19 20:36:45 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization'] +2024-07-19 20:36:48 (INFO): Loaded GemNetOC with 38864438 parameters. +2024-07-19 20:36:48 (WARNING): log_summary for Tensorboard not supported +2024-07-19 20:36:48 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated. +2024-07-19 20:36:48 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gnoc_oc22_oc20_all_s2ef.pt +2024-07-19 20:36:48 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) -2024-07-14 21:54:40 (INFO): energy_mae: 6.77e+00, forcesx_mae: 4.11e-02, forcesy_mae: 3.63e-02, forcesz_mae: 5.26e-02, forces_mae: 4.33e-02, forces_cosine_similarity: 8.24e-02, forces_magnitude_error: 7.42e-02, energy_forces_within_threshold: 0.00e+00, loss: 6.86e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01 -2024-07-14 21:54:41 (INFO): Evaluating on val. +2024-07-19 20:37:11 (INFO): energy_mae: 6.77e+00, forcesx_mae: 4.11e-02, forcesy_mae: 3.63e-02, forcesz_mae: 5.26e-02, forces_mae: 4.33e-02, forces_cosine_similarity: 8.24e-02, forces_magnitude_error: 7.42e-02, energy_forces_within_threshold: 0.00e+00, loss: 6.86e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01 +2024-07-19 20:37:12 (INFO): Evaluating on val. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:04<00:04, 4.63s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.56s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.78s/it] -2024-07-14 21:54:48 (INFO): energy_mae: 8.7333, forcesx_mae: 0.0231, forcesy_mae: 0.0188, forcesz_mae: 0.0198, forces_mae: 0.0206, forces_cosine_similarity: -0.0317, forces_magnitude_error: 0.0290, energy_forces_within_threshold: 0.0000, loss: 8.6590, epoch: 0.1695 -2024-07-14 21:54:49 (INFO): Predicting on test. + device 0: 50%|█████ | 1/2 [00:04<00:04, 4.82s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.69s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.91s/it] +2024-07-19 20:37:20 (INFO): energy_mae: 8.7333, forcesx_mae: 0.0231, forcesy_mae: 0.0188, forcesz_mae: 0.0198, forces_mae: 0.0206, forces_cosine_similarity: -0.0317, forces_magnitude_error: 0.0290, energy_forces_within_threshold: 0.0000, loss: 8.6590, epoch: 0.1695 +2024-07-19 20:37:20 (INFO): Predicting on test. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:02<00:02, 2.99s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.83s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.92s/it] -2024-07-14 21:54:54 (INFO): Writing results to fine-tuning/results/2024-07-14-21-54-08-ft-oxides/ocp_predictions.npz -2024-07-14 21:55:20 (INFO): energy_mae: 1.07e+01, forcesx_mae: 2.63e-02, forcesy_mae: 1.76e-02, forcesz_mae: 1.90e-02, forces_mae: 2.10e-02, forces_cosine_similarity: -3.68e-02, forces_magnitude_error: 2.26e-02, energy_forces_within_threshold: 0.00e+00, loss: 1.07e+01, lr: 5.00e-04, epoch: 3.39e-01, step: 2.00e+01 -2024-07-14 21:55:22 (INFO): Evaluating on val. + device 0: 50%|█████ | 1/2 [00:03<00:03, 3.06s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.87s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.98s/it] +2024-07-19 20:37:26 (INFO): Writing results to fine-tuning/results/2024-07-19-20-37-20-ft-oxides/ocp_predictions.npz +2024-07-19 20:37:52 (INFO): energy_mae: 1.07e+01, forcesx_mae: 2.63e-02, forcesy_mae: 1.76e-02, forcesz_mae: 1.90e-02, forces_mae: 2.10e-02, forces_cosine_similarity: -3.68e-02, forces_magnitude_error: 2.26e-02, energy_forces_within_threshold: 0.00e+00, loss: 1.07e+01, lr: 5.00e-04, epoch: 3.39e-01, step: 2.00e+01 +2024-07-19 20:37:54 (INFO): Evaluating on val. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:04<00:04, 4.53s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.61s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.83s/it] -2024-07-14 21:55:29 (INFO): energy_mae: 2.6828, forcesx_mae: 0.0235, forcesy_mae: 0.0180, forcesz_mae: 0.0129, forces_mae: 0.0181, forces_cosine_similarity: 0.0673, forces_magnitude_error: 0.0215, energy_forces_within_threshold: 0.0000, loss: 2.7108, epoch: 0.3390 -2024-07-14 21:55:30 (INFO): Predicting on test. + device 0: 50%|█████ | 1/2 [00:04<00:04, 4.76s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.78s/it] device 0: 100%|██████████| 2/2 [00:08<00:00, 4.01s/it] +2024-07-19 20:38:02 (INFO): energy_mae: 2.6851, forcesx_mae: 0.0235, forcesy_mae: 0.0180, forcesz_mae: 0.0129, forces_mae: 0.0181, forces_cosine_similarity: 0.0675, forces_magnitude_error: 0.0215, energy_forces_within_threshold: 0.0000, loss: 2.7130, epoch: 0.3390 +2024-07-19 20:38:02 (INFO): Predicting on test. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:02<00:02, 2.94s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.77s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.89s/it] -2024-07-14 21:55:36 (INFO): Writing results to fine-tuning/results/2024-07-14-21-54-08-ft-oxides/ocp_predictions.npz -2024-07-14 21:55:55 (INFO): energy_mae: 4.87e+00, forcesx_mae: 1.82e-02, forcesy_mae: 1.51e-02, forcesz_mae: 2.06e-02, forces_mae: 1.80e-02, forces_cosine_similarity: -1.04e-02, forces_magnitude_error: 2.32e-02, energy_forces_within_threshold: 0.00e+00, loss: 4.90e+00, lr: 5.00e-04, epoch: 5.08e-01, step: 3.00e+01 -2024-07-14 21:55:57 (INFO): Evaluating on val. + device 0: 50%|█████ | 1/2 [00:03<00:03, 3.21s/it] device 0: 100%|██████████| 2/2 [00:06<00:00, 2.98s/it] device 0: 100%|██████████| 2/2 [00:06<00:00, 3.10s/it] +2024-07-19 20:38:09 (INFO): Writing results to fine-tuning/results/2024-07-19-20-37-20-ft-oxides/ocp_predictions.npz +2024-07-19 20:38:28 (INFO): energy_mae: 4.87e+00, forcesx_mae: 1.81e-02, forcesy_mae: 1.51e-02, forcesz_mae: 2.08e-02, forces_mae: 1.80e-02, forces_cosine_similarity: -9.35e-03, forces_magnitude_error: 2.33e-02, energy_forces_within_threshold: 0.00e+00, loss: 4.91e+00, lr: 5.00e-04, epoch: 5.08e-01, step: 3.00e+01 +2024-07-19 20:38:30 (INFO): Evaluating on val. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:04<00:04, 4.52s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.49s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.74s/it] -2024-07-14 21:56:05 (INFO): energy_mae: 4.3714, forcesx_mae: 0.0261, forcesy_mae: 0.0283, forcesz_mae: 0.0320, forces_mae: 0.0288, forces_cosine_similarity: -0.0227, forces_magnitude_error: 0.0402, energy_forces_within_threshold: 0.0000, loss: 4.4578, epoch: 0.5085 -2024-07-14 21:56:29 (INFO): energy_mae: 4.48e+00, forcesx_mae: 2.75e-02, forcesy_mae: 2.07e-02, forcesz_mae: 2.90e-02, forces_mae: 2.57e-02, forces_cosine_similarity: -1.02e-01, forces_magnitude_error: 3.27e-02, energy_forces_within_threshold: 0.00e+00, loss: 4.53e+00, lr: 5.00e-04, epoch: 6.78e-01, step: 4.00e+01 -2024-07-14 21:56:31 (INFO): Evaluating on val. + device 0: 50%|█████ | 1/2 [00:04<00:04, 4.59s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.63s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.86s/it] +2024-07-19 20:38:38 (INFO): energy_mae: 4.4664, forcesx_mae: 0.0269, forcesy_mae: 0.0290, forcesz_mae: 0.0331, forces_mae: 0.0296, forces_cosine_similarity: -0.0270, forces_magnitude_error: 0.0418, energy_forces_within_threshold: 0.0000, loss: 4.5564, epoch: 0.5085 +2024-07-19 20:39:03 (INFO): energy_mae: 3.78e+00, forcesx_mae: 3.13e-02, forcesy_mae: 2.69e-02, forcesz_mae: 3.30e-02, forces_mae: 3.04e-02, forces_cosine_similarity: -1.22e-01, forces_magnitude_error: 3.97e-02, energy_forces_within_threshold: 0.00e+00, loss: 3.84e+00, lr: 5.00e-04, epoch: 6.78e-01, step: 4.00e+01 +2024-07-19 20:39:04 (INFO): Evaluating on val. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:04<00:04, 4.23s/it] device 0: 100%|██████████| 2/2 [00:06<00:00, 3.36s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.57s/it] -2024-07-14 21:56:38 (INFO): energy_mae: 1.8611, forcesx_mae: 0.0151, forcesy_mae: 0.0166, forcesz_mae: 0.0108, forces_mae: 0.0142, forces_cosine_similarity: 0.0740, forces_magnitude_error: 0.0166, energy_forces_within_threshold: 0.0000, loss: 1.8639, epoch: 0.6780 -2024-07-14 21:56:38 (INFO): Predicting on test. + device 0: 50%|█████ | 1/2 [00:04<00:04, 4.72s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.67s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.93s/it] +2024-07-19 20:39:12 (INFO): energy_mae: 1.3617, forcesx_mae: 0.0160, forcesy_mae: 0.0197, forcesz_mae: 0.0159, forces_mae: 0.0172, forces_cosine_similarity: 0.1332, forces_magnitude_error: 0.0215, energy_forces_within_threshold: 0.0000, loss: 1.3892, epoch: 0.6780 +2024-07-19 20:39:13 (INFO): Predicting on test. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:02<00:02, 2.97s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.87s/it] device 0: 100%|██████████| 2/2 [00:05<00:00, 2.96s/it] -2024-07-14 21:56:45 (INFO): Writing results to fine-tuning/results/2024-07-14-21-54-08-ft-oxides/ocp_predictions.npz -2024-07-14 21:57:05 (INFO): energy_mae: 6.16e+00, forcesx_mae: 4.45e-02, forcesy_mae: 4.71e-02, forcesz_mae: 4.13e-02, forces_mae: 4.43e-02, forces_cosine_similarity: -4.61e-02, forces_magnitude_error: 7.47e-02, energy_forces_within_threshold: 0.00e+00, loss: 6.26e+00, lr: 5.00e-04, epoch: 8.47e-01, step: 5.00e+01 -2024-07-14 21:57:07 (INFO): Evaluating on val. + device 0: 50%|█████ | 1/2 [00:03<00:03, 3.36s/it] device 0: 100%|██████████| 2/2 [00:06<00:00, 3.06s/it] device 0: 100%|██████████| 2/2 [00:06<00:00, 3.20s/it] +2024-07-19 20:39:19 (INFO): Writing results to fine-tuning/results/2024-07-19-20-37-20-ft-oxides/ocp_predictions.npz +2024-07-19 20:39:40 (INFO): energy_mae: 5.64e+00, forcesx_mae: 2.19e-02, forcesy_mae: 2.01e-02, forcesz_mae: 1.89e-02, forces_mae: 2.03e-02, forces_cosine_similarity: -1.43e-02, forces_magnitude_error: 2.72e-02, energy_forces_within_threshold: 0.00e+00, loss: 5.68e+00, lr: 5.00e-04, epoch: 8.47e-01, step: 5.00e+01 +2024-07-19 20:39:42 (INFO): Evaluating on val. device 0: 0%| | 0/2 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 50%|█████ | 1/2 [00:04<00:04, 4.38s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.40s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.64s/it] -2024-07-14 21:57:14 (INFO): energy_mae: 4.2005, forcesx_mae: 0.0768, forcesy_mae: 0.0611, forcesz_mae: 0.0710, forces_mae: 0.0696, forces_cosine_similarity: -0.0503, forces_magnitude_error: 0.1233, energy_forces_within_threshold: 0.0000, loss: 4.3445, epoch: 0.8475 -2024-07-14 21:57:33 (INFO): Total time taken: 195.92912602424622 + device 0: 50%|█████ | 1/2 [00:04<00:04, 4.87s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.66s/it] device 0: 100%|██████████| 2/2 [00:07<00:00, 3.96s/it] +2024-07-19 20:39:50 (INFO): energy_mae: 4.2642, forcesx_mae: 0.0281, forcesy_mae: 0.0285, forcesz_mae: 0.0348, forces_mae: 0.0305, forces_cosine_similarity: -0.0390, forces_magnitude_error: 0.0459, energy_forces_within_threshold: 0.0000, loss: 4.2915, epoch: 0.8475 +2024-07-19 20:40:09 (INFO): Total time taken: 201.06344485282898 diff --git a/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt index 4bb66b3365..0dcc41e67e 100644 --- a/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt +++ b/_downloads/819e10305ddd6839cd7da05935b17060/mass-inference.txt @@ -1,16 +1,16 @@ -2024-07-14 21:59:31 (INFO): Running in non-distributed local mode -2024-07-14 21:59:31 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem -2024-07-14 21:59:32 (INFO): amp: true +2024-07-19 20:42:08 (INFO): Running in non-distributed local mode +2024-07-19 20:42:08 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem +2024-07-19 20:42:10 (INFO): amp: true cmd: - checkpoint_dir: ./checkpoints/2024-07-14-22-00-32 - commit: c2f8928 + checkpoint_dir: ./checkpoints/2024-07-19-20-41-36 + commit: f9ecf73 identifier: '' - logs_dir: ./logs/tensorboard/2024-07-14-22-00-32 + logs_dir: ./logs/tensorboard/2024-07-19-20-41-36 print_every: 10 - results_dir: ./results/2024-07-14-22-00-32 + results_dir: ./results/2024-07-19-20-41-36 seed: 0 - timestamp_id: 2024-07-14-22-00-32 - version: 0.1.dev1+gc2f8928 + timestamp_id: 2024-07-19-20-41-36 + version: 0.1.dev1+gf9ecf73 dataset: {} evaluation_metrics: metrics: @@ -113,20 +113,20 @@ test_dataset: trainer: ocp val_dataset: {} -2024-07-14 21:59:32 (INFO): rank: 0: Sampler created... -2024-07-14 21:59:32 (INFO): Batch balancing is disabled for single GPU training. -2024-07-14 21:59:32 (INFO): Loading model: gemnet_t -2024-07-14 21:59:34 (INFO): Loaded GemNetT with 31671825 parameters. -2024-07-14 21:59:34 (WARNING): log_summary for Tensorboard not supported -2024-07-14 21:59:34 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gndt_oc22_all_s2ef.pt -2024-07-14 21:59:34 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. -2024-07-14 21:59:34 (WARNING): Scale factor comment not found in model -2024-07-14 21:59:34 (INFO): Predicting on test. +2024-07-19 20:42:10 (INFO): rank: 0: Sampler created... +2024-07-19 20:42:10 (INFO): Batch balancing is disabled for single GPU training. +2024-07-19 20:42:10 (INFO): Loading model: gemnet_t +2024-07-19 20:42:11 (INFO): Loaded GemNetT with 31671825 parameters. +2024-07-19 20:42:11 (WARNING): log_summary for Tensorboard not supported +2024-07-19 20:42:12 (INFO): Loading checkpoint from: /tmp/fairchem_checkpoints/gndt_oc22_all_s2ef.pt +2024-07-19 20:42:12 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. +2024-07-19 20:42:12 (WARNING): Scale factor comment not found in model +2024-07-19 20:42:12 (INFO): Predicting on test. device 0: 0%| | 0/3 [00:00, ?it/s]/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) /opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() storage = elem.storage()._new_shared(numel) - device 0: 33%|███████████▋ | 1/3 [00:03<00:06, 3.13s/it] device 0: 67%|███████████████████████▎ | 2/3 [00:06<00:03, 3.02s/it] device 0: 100%|███████████████████████████████████| 3/3 [00:08<00:00, 2.97s/it] device 0: 100%|███████████████████████████████████| 3/3 [00:09<00:00, 3.01s/it] -2024-07-14 21:59:43 (INFO): Writing results to ./results/2024-07-14-22-00-32/ocp_predictions.npz -2024-07-14 21:59:43 (INFO): Total time taken: 9.158892154693604 -Elapsed time = 15.3 seconds + device 0: 33%|███████████▋ | 1/3 [00:03<00:07, 3.55s/it] device 0: 67%|███████████████████████▎ | 2/3 [00:06<00:03, 3.23s/it] device 0: 100%|███████████████████████████████████| 3/3 [00:09<00:00, 3.27s/it] device 0: 100%|███████████████████████████████████| 3/3 [00:09<00:00, 3.30s/it] +2024-07-19 20:42:22 (INFO): Writing results to ./results/2024-07-19-20-41-36/ocp_predictions.npz +2024-07-19 20:42:22 (INFO): Total time taken: 10.0305335521698 +Elapsed time = 16.3 seconds diff --git a/_images/033da45932f0ff46d77fde92685d4380a02364b0624159f737e66681d9f22946.png b/_images/033da45932f0ff46d77fde92685d4380a02364b0624159f737e66681d9f22946.png deleted file mode 100644 index 65e0798bd4..0000000000 Binary files a/_images/033da45932f0ff46d77fde92685d4380a02364b0624159f737e66681d9f22946.png and /dev/null differ diff --git a/_images/078ce4f4475733b0f35a252d810b61fb444427662bad332a2767171e51f40078.png b/_images/078ce4f4475733b0f35a252d810b61fb444427662bad332a2767171e51f40078.png new file mode 100644 index 0000000000..64871b53f4 Binary files /dev/null and b/_images/078ce4f4475733b0f35a252d810b61fb444427662bad332a2767171e51f40078.png differ diff --git a/_images/0c124c73213ff9d9f015ee8f28d0d833780ebf228603d12aec4868c103bad014.png b/_images/0c124c73213ff9d9f015ee8f28d0d833780ebf228603d12aec4868c103bad014.png deleted file mode 100644 index a8dfe5f6f7..0000000000 Binary files a/_images/0c124c73213ff9d9f015ee8f28d0d833780ebf228603d12aec4868c103bad014.png and /dev/null differ diff --git a/_images/1135cb8f3db5ab6bc6c5f4784263c92318c81d1a6bc4f29374b7067f81a3d30e.png b/_images/1135cb8f3db5ab6bc6c5f4784263c92318c81d1a6bc4f29374b7067f81a3d30e.png new file mode 100644 index 0000000000..e743c3a11c Binary files /dev/null and b/_images/1135cb8f3db5ab6bc6c5f4784263c92318c81d1a6bc4f29374b7067f81a3d30e.png differ diff --git a/_images/16129b0519e4aa0f26a0fa6773d805263f2df73cceb38069007ef8fe6efe8c1d.png b/_images/16129b0519e4aa0f26a0fa6773d805263f2df73cceb38069007ef8fe6efe8c1d.png new file mode 100644 index 0000000000..1179737aa9 Binary files /dev/null and b/_images/16129b0519e4aa0f26a0fa6773d805263f2df73cceb38069007ef8fe6efe8c1d.png differ diff --git a/_images/2afa38ee2ad4f3a3071f9f940aa53f830cf5c83ce9bf222f0930cf66f6b1622f.png b/_images/2afa38ee2ad4f3a3071f9f940aa53f830cf5c83ce9bf222f0930cf66f6b1622f.png deleted file mode 100644 index 9021084cac..0000000000 Binary files a/_images/2afa38ee2ad4f3a3071f9f940aa53f830cf5c83ce9bf222f0930cf66f6b1622f.png and /dev/null differ diff --git a/_images/32ce1e7593a154d5c27f44d27ff7148f9acf5b23c8408d13a143ee100d00fc99.png b/_images/32ce1e7593a154d5c27f44d27ff7148f9acf5b23c8408d13a143ee100d00fc99.png new file mode 100644 index 0000000000..d6152a5563 Binary files /dev/null and b/_images/32ce1e7593a154d5c27f44d27ff7148f9acf5b23c8408d13a143ee100d00fc99.png differ diff --git a/_images/4824685f0ad98d72e49a4607601630c480c1bb562b7de0372abf12f6a4814109.png b/_images/4824685f0ad98d72e49a4607601630c480c1bb562b7de0372abf12f6a4814109.png deleted file mode 100644 index 3b3431f2db..0000000000 Binary files a/_images/4824685f0ad98d72e49a4607601630c480c1bb562b7de0372abf12f6a4814109.png and /dev/null differ diff --git a/_images/4dbf118d1e1d535a138fae434b6a57e5f2414b589b67014e0d9778c425387312.png b/_images/4dbf118d1e1d535a138fae434b6a57e5f2414b589b67014e0d9778c425387312.png deleted file mode 100644 index 9d4b99d47b..0000000000 Binary files a/_images/4dbf118d1e1d535a138fae434b6a57e5f2414b589b67014e0d9778c425387312.png and /dev/null differ diff --git a/_images/5620ea27ffb3b5681339b3d9052c2011d8c61c24ab637877fcd2b45fb6e81c96.png b/_images/5620ea27ffb3b5681339b3d9052c2011d8c61c24ab637877fcd2b45fb6e81c96.png new file mode 100644 index 0000000000..524daf65d1 Binary files /dev/null and b/_images/5620ea27ffb3b5681339b3d9052c2011d8c61c24ab637877fcd2b45fb6e81c96.png differ diff --git a/_images/db683f9834a520f8261218b0cc097a56141eade11e7b9604c0754b69e071993f.png b/_images/5ea9123d4010ccf0797c15fe50c77eee0b4885930a80abf983790f5c16915dc3.png similarity index 70% rename from _images/db683f9834a520f8261218b0cc097a56141eade11e7b9604c0754b69e071993f.png rename to _images/5ea9123d4010ccf0797c15fe50c77eee0b4885930a80abf983790f5c16915dc3.png index b66cab6ddc..93e0dfb427 100644 Binary files a/_images/db683f9834a520f8261218b0cc097a56141eade11e7b9604c0754b69e071993f.png and b/_images/5ea9123d4010ccf0797c15fe50c77eee0b4885930a80abf983790f5c16915dc3.png differ diff --git a/_images/6035babb14db7ac4d78cc851f2fcbbf73220a9d65bd43c650466cc72ca030d9d.png b/_images/6035babb14db7ac4d78cc851f2fcbbf73220a9d65bd43c650466cc72ca030d9d.png deleted file mode 100644 index cce4f2dc27..0000000000 Binary files a/_images/6035babb14db7ac4d78cc851f2fcbbf73220a9d65bd43c650466cc72ca030d9d.png and /dev/null differ diff --git a/_images/aee96fc3ceee5280ecb2704288b1ef4902bbe3b113f789e61756f34ec2983c3e.png b/_images/aee96fc3ceee5280ecb2704288b1ef4902bbe3b113f789e61756f34ec2983c3e.png new file mode 100644 index 0000000000..cd1657c009 Binary files /dev/null and b/_images/aee96fc3ceee5280ecb2704288b1ef4902bbe3b113f789e61756f34ec2983c3e.png differ diff --git a/_images/be62301e07594b3378c727532f052de9b51cd1de9c835cc644571ba8714ac17c.png b/_images/be62301e07594b3378c727532f052de9b51cd1de9c835cc644571ba8714ac17c.png deleted file mode 100644 index eb75b61783..0000000000 Binary files a/_images/be62301e07594b3378c727532f052de9b51cd1de9c835cc644571ba8714ac17c.png and /dev/null differ diff --git a/_images/f508405455676bf2a7ab2b64f3133d6a31c7e2bed12b4e68b1bae30fb759e03b.png b/_images/f508405455676bf2a7ab2b64f3133d6a31c7e2bed12b4e68b1bae30fb759e03b.png new file mode 100644 index 0000000000..a2d2e8cba0 Binary files /dev/null and b/_images/f508405455676bf2a7ab2b64f3133d6a31c7e2bed12b4e68b1bae30fb759e03b.png differ diff --git a/_sources/core/fine-tuning/fine-tuning-oxides.md b/_sources/core/fine-tuning/fine-tuning-oxides.md index d01b8fcbdc..77a9350d3b 100644 --- a/_sources/core/fine-tuning/fine-tuning-oxides.md +++ b/_sources/core/fine-tuning/fine-tuning-oxides.md @@ -29,7 +29,7 @@ We get this checkpoint here. ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') ``` The data we need is provided in `supporting-information.json`. That file is embedded in the supporting information for the article, and is provided here in the tutorial. We load this data and explore it a little. The json file provides a dictionary with the structure: diff --git a/_sources/core/gotchas.md b/_sources/core/gotchas.md index f548bb9cea..d0dfac5b12 100644 --- a/_sources/core/gotchas.md +++ b/_sources/core/gotchas.md @@ -45,7 +45,7 @@ The problem here is that no neighbors are found for the single atom which causes ```{code-cell} ipython3 from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path) ``` @@ -79,7 +79,7 @@ add_adsorbate(slab, 'O', height=1.2, position='fcc') from fairchem.core.models.model_registry import model_name_to_local_file # OC20 model - trained on adsorption energies -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EF-OC20-All', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EF-OC20-All', local_cache='/tmp/fairchem_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -92,7 +92,7 @@ slab.get_potential_energy() ```{code-cell} ipython3 # An OC22 checkpoint - trained on total energy -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -105,7 +105,7 @@ slab.get_potential_energy() ```{code-cell} ipython3 # This eSCN model is trained on adsorption energies -checkpoint_path = model_name_to_local_file('eSCN-L4-M2-Lay12-S2EF-OC20-2M', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('eSCN-L4-M2-Lay12-S2EF-OC20-2M', local_cache='/tmp/fairchem_checkpoints/') with contextlib.redirect_stdout(StringIO()) as _: calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=False) @@ -158,7 +158,7 @@ from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file import os -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path) ``` @@ -184,7 +184,7 @@ from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file import os -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path) ``` @@ -209,7 +209,7 @@ from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file import os -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path) ``` @@ -231,7 +231,7 @@ This happens because a random selection of is made to sample edges, and a differ from fairchem.core.models.model_registry import model_name_to_local_file from fairchem.core.common.relaxation.ase_utils import OCPCalculator -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=True) from ase.build import fcc111, add_adsorbate @@ -258,7 +258,7 @@ In DFT, the forces on all the atoms should sum to zero; otherwise, there is a ne ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') from fairchem.core.common.relaxation.ase_utils import OCPCalculator calc = OCPCalculator(checkpoint_path=checkpoint_path, cpu=True) diff --git a/_sources/core/inference.md b/_sources/core/inference.md index fc717d71c7..d10dc6fed2 100644 --- a/_sources/core/inference.md +++ b/_sources/core/inference.md @@ -70,7 +70,7 @@ print(available_pretrained_models) ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-dT-S2EFS-OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-dT-S2EFS-OC22', local_cache='/tmp/fairchem_checkpoints/') checkpoint_path ``` diff --git a/_sources/core/papers_using_models.md b/_sources/core/papers_using_models.md index a59b7589f6..a1040199af 100644 --- a/_sources/core/papers_using_models.md +++ b/_sources/core/papers_using_models.md @@ -1,5 +1,47 @@ -# Studies that have leveraged OCP models +# Studies leveraging pre-trained or fine-tuned FAIR-Chem models + +Many papers have now used FAIR-Chem models to accelerate screening and discovery efforts and enable new computational chemistry simulations. We highlight some here just to give an idea of the breadth of possibilities and how they have been used. Feel free to reach out (or submit PRs) if you want them included! + +## Accelerating computational catalysis + +```{bibliography} +:style: plain +:filter: category == "accelerating catalysis" +``` + +## Transfer/fine-tuning strategies for FAIR-Chem pre-trained checkpoints + +```{bibliography} +:style: plain +:filter: category == "transfer strategies" +``` + +## Transfer/fine-tuning applications for FAIR-Chem pre-trained checkpoints + +```{bibliography} +:style: plain +:filter: category == "transfer applications" +``` + +## Catalyst discovery or optimization + +```{bibliography} +:style: plain +:filter: category == "catalyst discovery" +``` + +## Uncertainty quantification + +```{bibliography} +:style: plain +:filter: category == "uncertainty" +``` + +## Properties beyond energies and forces + +```{bibliography} +:style: plain +:filter: category == "other properties" +``` -Many papers have now used the latest OCP models to accelerate screening and discovery efforts and enable new computational chemistry simulations! -We highlight some here just to give an idea of the breadth of possibilities and how they have been used. Feel free to reach out (or submit PRs with links to your papers if you want them included)! diff --git a/_sources/core/quickstart.md b/_sources/core/quickstart.md index 7396dfb4d9..b2788d011f 100644 --- a/_sources/core/quickstart.md +++ b/_sources/core/quickstart.md @@ -26,7 +26,7 @@ print(available_pretrained_models) ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') checkpoint_path ``` diff --git a/_sources/legacy_tutorials/OCP_Tutorial.md b/_sources/legacy_tutorials/OCP_Tutorial.md index b05cef74b1..8b5d4d522a 100644 --- a/_sources/legacy_tutorials/OCP_Tutorial.md +++ b/_sources/legacy_tutorials/OCP_Tutorial.md @@ -1388,7 +1388,7 @@ relax_dataset = "data/is2re/val_20/data.lmdb" from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-dT-S2EF-OC20-All', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-dT-S2EF-OC20-All', local_cache='/tmp/fairchem_checkpoints/') ``` @@ -2021,7 +2021,7 @@ For this tutorial we download one of our earlier model checkpoints: GemNet-T from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-dT-S2EF-OC20-All', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-dT-S2EF-OC20-All', local_cache='/tmp/fairchem_checkpoints/') ``` diff --git a/_sources/tutorials/NRR/NRR_example.md b/_sources/tutorials/NRR/NRR_example.md index dfbdae3942..b69e078d1a 100644 --- a/_sources/tutorials/NRR/NRR_example.md +++ b/_sources/tutorials/NRR/NRR_example.md @@ -34,12 +34,17 @@ import os from glob import glob import pandas as pd from fairchem.data.oc.utils import DetectTrajAnomaly + +# Set random seed to ensure adsorbate enumeration yields a valid candidate +# If using a larger number of random samples this wouldn't be necessary +import numpy as np +np.random.seed(22) ``` ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') checkpoint_path ``` diff --git a/_sources/tutorials/OCP-introduction.md b/_sources/tutorials/OCP-introduction.md index 4bf827faca..d426021a07 100644 --- a/_sources/tutorials/OCP-introduction.md +++ b/_sources/tutorials/OCP-introduction.md @@ -60,7 +60,7 @@ The different models have different compute requirements. If you find your kerne ```{code-cell} from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') ``` Next we load the checkpoint. The output is somewhat verbose, but it can be informative for debugging purposes. diff --git a/_sources/tutorials/adsorbml_walkthrough.md b/_sources/tutorials/adsorbml_walkthrough.md index 262ac3fca5..7d6beeda1d 100644 --- a/_sources/tutorials/adsorbml_walkthrough.md +++ b/_sources/tutorials/adsorbml_walkthrough.md @@ -75,7 +75,7 @@ from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file import os -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') os.makedirs(f"data/{bulk}_{adsorbate}", exist_ok=True) diff --git a/_sources/tutorials/advanced/embeddings.md b/_sources/tutorials/advanced/embeddings.md index 04a605ae18..ec1a25c12c 100644 --- a/_sources/tutorials/advanced/embeddings.md +++ b/_sources/tutorials/advanced/embeddings.md @@ -58,7 +58,7 @@ from fairchem.core.common.relaxation.ase_utils import OCPCalculator from fairchem.core.models.model_registry import model_name_to_local_file import os -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') calc = OCPCalculator(checkpoint_path=checkpoint_path) ``` diff --git a/_sources/tutorials/advanced/fine-tuning-in-python.md b/_sources/tutorials/advanced/fine-tuning-in-python.md index 568d92723b..1d14219c88 100644 --- a/_sources/tutorials/advanced/fine-tuning-in-python.md +++ b/_sources/tutorials/advanced/fine-tuning-in-python.md @@ -51,7 +51,7 @@ root.addHandler(handler_err) ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') from fairchem.core.common.relaxation.ase_utils import OCPCalculator calc = OCPCalculator(checkpoint_path=checkpoint_path, trainer='forces', cpu=False) ``` diff --git a/_sources/tutorials/cattsunami_walkthrough.md b/_sources/tutorials/cattsunami_tutorial.md similarity index 76% rename from _sources/tutorials/cattsunami_walkthrough.md rename to _sources/tutorials/cattsunami_tutorial.md index 025a93e63c..d5f397b828 100644 --- a/_sources/tutorials/cattsunami_walkthrough.md +++ b/_sources/tutorials/cattsunami_tutorial.md @@ -4,14 +4,14 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.16.1 + jupytext_version: 1.16.3 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 --- -# CatTSunami tutorial +# CatTSunami Tutorial ```{code-cell} ipython3 --- @@ -31,20 +31,17 @@ import matplotlib.pyplot as plt from fairchem.applications.cattsunami.core.autoframe import AutoFrameDissociation from fairchem.applications.cattsunami.core import OCPNEB from ase.io import read -from IPython.display import Image -# Optional -# from x3dase.x3d import X3D +#Optional +from IPython.display import Image +from x3dase.x3d import X3D -# Set random seed +#Set random seed import numpy as np np.random.seed(22) ``` -## Do enumerations in an AdsorbML style for CH dissociation on Ru (001) - -To start, we generate placements for the reactant and product species on the surface. We utilize the random placement approach which was developed for AdsorbML, and use an OCP model to relax our placements on the surface. These placements and their ML-determined energies are used as input to the CatTSunami automatic NEB frame generation approach. - +## Do enumerations in an AdsorbML style ```{code-cell} ipython3 --- @@ -54,16 +51,31 @@ tags: ["skip-execution"] reaction = Reaction(reaction_str_from_db="*CH -> *C + *H", reaction_db_path=DISSOCIATION_REACTION_DB_PATH, adsorbate_db_path = ADSORBATE_PKL_PATH) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Instantiate our adsorbate class for the reactant and product reactant = Adsorbate(adsorbate_id_from_db=reaction.reactant1_idx, adsorbate_db_path=ADSORBATE_PKL_PATH) product1 = Adsorbate(adsorbate_id_from_db=reaction.product1_idx, adsorbate_db_path=ADSORBATE_PKL_PATH) product2 = Adsorbate(adsorbate_id_from_db=reaction.product2_idx, adsorbate_db_path=ADSORBATE_PKL_PATH) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Grab the bulk and cut the slab we are interested in bulk = Bulk(bulk_src_id_from_db="mp-33", bulk_db_path=BULK_PKL_PATH) slab = Slab.from_bulk_get_specific_millers(bulk = bulk, specific_millers=(0,0,1)) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Perform site enumeration # For AdsorbML num_sites = 100, but we use 5 here for brevity. This should be increased for practical use. reactant_configs = AdsorbateSlabConfig(slab = slab[0], adsorbate = reactant, @@ -84,21 +96,11 @@ tags: ["skip-execution"] # Instantiate the calculator # NOTE: If you have a GPU, use cpu = False # NOTE: Change the checkpoint path to locally downloaded files as needed -checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') cpu = True calc = OCPCalculator(checkpoint_path = checkpoint_path, cpu = cpu) ``` -### Run ML local relaxations: - -There are 2 options for how to do this. - 1. Using `OCPCalculator` as the calculator within the ASE framework - 2. By writing objects to lmdb and relaxing them using `main.py` in the ocp repo - -(1) is really only adequate for small stuff and it is what I will show here, but if you plan to run many relaxations, you should definitely use (2). More details about writing lmdbs has been provided [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/tutorials/lmdb_dataset_creation.ipynb) - follow the IS2RS/IS2RE instructions. And more information about running relaxations once the lmdb has been written is [here](https://github.com/Open-Catalyst-Project/ocp/blob/main/TRAIN.md#initial-structure-to-relaxed-structure-is2rs). - -You need to provide the calculator with a path to a model checkpoint file. That can be downloaded [here](../core/model_checkpoints) - ```{code-cell} ipython3 --- tags: ["skip-execution"] @@ -110,7 +112,12 @@ for config in reactant_configs: opt = BFGS(config) opt.run(fmax = 0.05, steps=200) reactant_energies.append(config.get_potential_energy()) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Relax the product systems product1_energies = [] for config in product1_configs: @@ -118,7 +125,12 @@ for config in product1_configs: opt = BFGS(config) opt.run(fmax = 0.05, steps=200) product1_energies.append(config.get_potential_energy()) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- product2_energies = [] for config in product2_configs: config.calc = calc @@ -128,8 +140,13 @@ for config in product2_configs: ``` ## Enumerate NEBs -Here we use the class we created to handle automatic generation of NEB frames to create frames using the structures we just relaxed as input. - + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +Image(filename="dissociation_scheme.png") +``` ```{code-cell} ipython3 --- @@ -146,7 +163,12 @@ af = AutoFrameDissociation( r_product2_max=3, #r3 in the above fig r_product2_min=1, #r2 in the above fig ) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- nframes = 10 frame_sets, mapping_idxs = af.get_neb_frames(calc, n_frames = nframes, @@ -156,7 +178,6 @@ frame_sets, mapping_idxs = af.get_neb_frames(calc, ``` ## Run NEBs -Here we use the custom child class we created to run NEB relaxations using ML. The class we created allows the frame relaxations to be batched, improving efficiency. ```{code-cell} ipython3 --- @@ -189,7 +210,7 @@ tags: ["skip-execution"] # conv = optimizer.run(fmax=fmax, steps=300) # if conv: # converged_idxs.append(idx) - + # print(converged_idxs) ``` @@ -217,14 +238,13 @@ if conv: conv = optimizer.run(fmax=fmax, steps=300) ``` -## (Optional) Visualize the results +## Visualize the results ```{code-cell} ipython3 --- tags: ["skip-execution"] --- -idx_of_interest = 0 -optimized_neb = read(f"n2_dissoc_on_Ru_{idx_of_interest}.traj", ":")[-1*nframes:] +optimized_neb = read(f"ch_dissoc_on_Ru_{converged_idxs[0]}.traj", ":")[-1*nframes:] ``` ```{code-cell} ipython3 @@ -235,7 +255,12 @@ es = [] for frame in optimized_neb: frame.set_calculator(calc) es.append(frame.get_potential_energy()) +``` +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- # Plot the reaction coordinate es = [e - es[0] for e in es] diff --git a/_sources/tutorials/fairchem_models_for_nebs.md b/_sources/tutorials/fairchem_models_for_nebs.md new file mode 100644 index 0000000000..b2723288d1 --- /dev/null +++ b/_sources/tutorials/fairchem_models_for_nebs.md @@ -0,0 +1,174 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.3 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Tutorial for using Fair Chemistry models to relax NEBs + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +from ase.optimize import BFGS +from ase.io import read + +from fairchem.applications.cattsunami.core.autoframe import interpolate +from fairchem.applications.cattsunami.core import OCPNEB +from fairchem.core.models.model_registry import model_name_to_local_file + +#Optional +from x3dase.x3d import X3D +import matplotlib.pyplot as plt +import os +``` + +## Set up inputs + +Shown here are the values used consistently throughout the paper. + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +fmax = 0.05 # [eV / ang] +delta_fmax_climb = 0.4 # this means that when the fmax is below 0.45 eV/Ang climbing image will be turned on +k = 1 # you may adjust this value as you see fit +cpu = True # set to False if you have a GPU + + +# NOTE: Change the checkpoint path to locally downloaded files as needed +checkpoint_path = model_name_to_local_file('EquiformerV2-31M-S2EF-OC20-All+MD', local_cache='/tmp/fairchem_checkpoints/') +``` + +## If you have your own set of NEB frames + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +""" +Load your frames (change to the appropriate loading method) +The approach uses ase, so you must provide a list of ase.Atoms objects +with the appropriate constraints. +""" +cwd = os.getcwd() +path_ = os.path.abspath(os.path.join(cwd, os.pardir, os.pardir)) +path_ = os.path.join(path_, "src", "fairchem", "applications", "cattsunami", "tutorial", "sample_traj.traj") +frame_set = read(path_, ":")[0:10] # Change to the path to your atoms of the frame set +``` + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +neb = OCPNEB( + frame_set, + checkpoint_path=checkpoint_path, + k=k, + batch_size=8, # If you get a memory error, try reducing this to 4 + cpu = cpu, +) +optimizer = BFGS( + neb, + trajectory=f"your-neb.traj", +) +conv = optimizer.run(fmax=fmax + delta_fmax_climb, steps=200) +if conv: + neb.climb = True + conv = optimizer.run(fmax=fmax, steps=300) +``` + +## If you have a proposed initial and final frame + +You may use the `interpolate` function we implemented which is very similar to idpp but not sensative to periodic boundary crossings. Alternatively you can adopt whatever interpolation scheme you prefer. The `interpolate` function lacks some of the extra protections implemented in the `interpolate_and_correct_frames` which is used in the CatTSunami enumeration workflow. Care should be taken to ensure the results are reasonable. + +IMPORTANT NOTES: +1. Make sure the indices in the initial and final frame map to the same atoms +2. Ensure you have the proper constraints on subsurface atoms + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +""" +Load your initial and frames (change to the appropriate loading method) +The approach uses ase, so you must provide ase.Atoms objects +with the appropriate constraints (i.e. fixed subsurface atoms). +""" +initial_frame = read("path-to-your-initial-atoms.traj") +final_frame = read("path-to-your-final-atoms.traj") +num_frames = 10 # you may change this to whatever you like +``` + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +frame_set = interpolate(initial_frame, final_frame, num_frames) + +neb = OCPNEB( + frame_set, + checkpoint_path=checkpoint_path, + k=k, + batch_size=8, # If you get a memory error, try reducing this to 4 + cpu = cpu, +) +optimizer = BFGS( + neb, + trajectory=f"your-neb.traj", +) +conv = optimizer.run(fmax=fmax + delta_fmax_climb, steps=200) +if conv: + neb.climb = True + conv = optimizer.run(fmax=fmax, steps=300) +``` + +## Visualize the results + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +optimized_neb = read(f"your-neb.traj", ":")[-1*nframes:] +``` + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +es = [] +for frame in optimized_neb: + frame.set_calculator(calc) + es.append(frame.get_potential_energy()) +``` + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +# Plot the reaction coordinate + +es = [e - es[0] for e in es] +plt.plot(es) +plt.xlabel("frame number") +plt.ylabel("relative energy [eV]") +plt.title(f"Ea = {max(es):1.2f} eV") +plt.savefig("reaction_coordinate.png") +``` + +```{code-cell} ipython3 +--- +tags: ["skip-execution"] +--- +# Make an interative html file of the optimized neb trajectory +x3d = X3D(optimized_neb) +x3d.write("your-neb.html") +``` diff --git a/_sources/tutorials/intro.md b/_sources/tutorials/intro.md index 51799a4d81..4be41ae745 100644 --- a/_sources/tutorials/intro.md +++ b/_sources/tutorials/intro.md @@ -68,7 +68,7 @@ You can get a checkpoint file with one of the keys listed above like this. The r ```{code-cell} ipython3 from fairchem.core.models.model_registry import model_name_to_local_file -checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/ocp_checkpoints/') +checkpoint_path = model_name_to_local_file('GemNet-OC-S2EFS-OC20+OC22', local_cache='/tmp/fairchem_checkpoints/') checkpoint_path ``` diff --git a/_static/searchtools.js b/_static/searchtools.js index 92da3f8b22..b08d58c9b9 100644 --- a/_static/searchtools.js +++ b/_static/searchtools.js @@ -178,7 +178,7 @@ const Search = { htmlToText: (htmlString, anchor) => { const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - for (const removalQuery of [".headerlinks", "script", "style"]) { + for (const removalQuery of [".headerlink", "script", "style"]) { htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); } if (anchor) { @@ -328,13 +328,14 @@ const Search = { for (const [title, foundTitles] of Object.entries(allTitles)) { if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) + const score = Math.round(Scorer.title * queryLower.length / title.length); + const boost = titles[file] === title ? 1 : 0; // add a boost for document titles normalResults.push([ docNames[file], titles[file] !== title ? `${titles[file]} > ${title}` : title, id !== null ? "#" + id : "", null, - score, + score + boost, filenames[file], ]); } diff --git a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html index a3a52befd8..271409b267 100644 --- a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html +++ b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html @@ -194,7 +194,7 @@
OCP API & Demo