diff --git a/.buildinfo b/.buildinfo index fddc69866e..926c8c9789 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: b5d0ff58eedd87b93894b95d734e1b60 +config: ac9e52e0f6ce67401b12e91af06590f6 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt b/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt deleted file mode 100644 index 72f40e298e..0000000000 --- a/_downloads/5fdddbed2260616231dbf7b0d94bb665/train.txt +++ /dev/null @@ -1,224 +0,0 @@ -2024-05-17 16:41:34 (INFO): Project root: /home/runner/work/fairchem/fairchem/src/fairchem -2024-05-17 16:41:35 (WARNING): Detected old config, converting to new format. Consider updating to avoid potential incompatibilities. -2024-05-17 16:41:35 (INFO): amp: true -cmd: - checkpoint_dir: fine-tuning/checkpoints/2024-05-17-16-40-32-ft-oxides - commit: 59cf718 - identifier: ft-oxides - logs_dir: fine-tuning/logs/tensorboard/2024-05-17-16-40-32-ft-oxides - print_every: 10 - results_dir: fine-tuning/results/2024-05-17-16-40-32-ft-oxides - seed: 0 - timestamp_id: 2024-05-17-16-40-32-ft-oxides - version: 0.1.dev1+g59cf718 -dataset: - a2g_args: - r_energy: true - r_forces: true - format: ase_db - key_mapping: - force: forces - y: energy - src: train.db -eval_metrics: - metrics: - energy: - - mae - forces: - - forcesx_mae - - forcesy_mae - - forcesz_mae - - mae - - cosine_similarity - - magnitude_error - misc: - - energy_forces_within_threshold - primary_metric: forces_mae -gpus: 0 -logger: tensorboard -loss_fns: -- energy: - coefficient: 1 - fn: mae -- forces: - coefficient: 1 - fn: l2mae -model: gemnet_oc -model_attributes: - activation: silu - atom_edge_interaction: true - atom_interaction: true - cbf: - name: spherical_harmonics - cutoff: 12.0 - cutoff_aeaint: 12.0 - cutoff_aint: 12.0 - cutoff_qint: 12.0 - direct_forces: true - edge_atom_interaction: true - emb_size_aint_in: 64 - emb_size_aint_out: 64 - emb_size_atom: 256 - emb_size_cbf: 16 - emb_size_edge: 512 - emb_size_quad_in: 32 - emb_size_quad_out: 32 - emb_size_rbf: 16 - emb_size_sbf: 32 - emb_size_trip_in: 64 - emb_size_trip_out: 64 - envelope: - exponent: 5 - name: polynomial - extensive: true - forces_coupled: false - max_neighbors: 30 - max_neighbors_aeaint: 20 - max_neighbors_aint: 1000 - max_neighbors_qint: 8 - num_after_skip: 2 - num_atom: 3 - num_atom_emb_layers: 2 - num_before_skip: 2 - num_blocks: 4 - num_concat: 1 - num_global_out_layers: 2 - num_output_afteratom: 3 - num_radial: 128 - num_spherical: 7 - otf_graph: true - output_init: HeOrthogonal - qint_tags: - - 1 - - 2 - quad_interaction: true - rbf: - name: gaussian - regress_forces: true - sbf: - name: legendre_outer - symmetric_edge_symmetrization: false -noddp: false -optim: - batch_size: 4 - clip_grad_norm: 10 - ema_decay: 0.999 - energy_coefficient: 1 - eval_batch_size: 16 - eval_every: 10 - factor: 0.8 - force_coefficient: 1 - load_balancing: atoms - loss_energy: mae - lr_initial: 0.0005 - max_epochs: 1 - mode: min - num_workers: 2 - optimizer: AdamW - optimizer_params: - amsgrad: true - patience: 3 - scheduler: ReduceLROnPlateau - weight_decay: 0 -outputs: - energy: - level: system - forces: - eval_on_free_atoms: true - level: atom - train_on_free_atoms: false -slurm: {} -task: - dataset: ase_db -test_dataset: - a2g_args: - r_energy: false - r_forces: false - src: test.db -trainer: ocp -val_dataset: - a2g_args: - r_energy: true - r_forces: true - src: val.db - -2024-05-17 16:41:35 (INFO): Loading dataset: ase_db -2024-05-17 16:41:35 (INFO): rank: 0: Sampler created... -2024-05-17 16:41:35 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 16:41:35 (INFO): rank: 0: Sampler created... -2024-05-17 16:41:35 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 16:41:36 (INFO): rank: 0: Sampler created... -2024-05-17 16:41:36 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 16:41:36 (INFO): Loading model: gemnet_oc -2024-05-17 16:41:36 (WARNING): Unrecognized arguments: ['symmetric_edge_symmetrization'] -2024-05-17 16:41:38 (INFO): Loaded GemNetOC with 38864438 parameters. -2024-05-17 16:41:38 (WARNING): Model gradient logging to tensorboard not yet supported. -2024-05-17 16:41:38 (WARNING): Using `weight_decay` from `optim` instead of `optim.optimizer_params`.Please update your config to use `optim.optimizer_params.weight_decay`.`optim.weight_decay` will soon be deprecated. -2024-05-17 16:41:38 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gnoc_oc22_oc20_all_s2ef.pt -2024-05-17 16:41:38 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. -/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() - storage = elem.storage()._new_shared(numel) -/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/torch_geometric/data/collate.py:145: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() - storage = elem.storage()._new_shared(numel) -2024-05-17 16:42:01 (INFO): energy_forces_within_threshold: 0.00e+00, energy_mae: 6.77e+00, forcesx_mae: 4.11e-02, forcesy_mae: 3.63e-02, forcesz_mae: 5.26e-02, forces_mae: 4.33e-02, forces_cosine_similarity: 8.24e-02, forces_magnitude_error: 7.42e-02, loss: 6.86e+00, lr: 5.00e-04, epoch: 1.69e-01, step: 1.00e+01 -2024-05-17 16:42:02 (INFO): Evaluating on val. - device 0: 0%| | 0/2 [00:005,xc=PBE - src: data.db -eval_metrics: - metrics: - energy: - - mae - forces: - - forcesx_mae - - forcesy_mae - - forcesz_mae - - mae - - cosine_similarity - - magnitude_error - misc: - - energy_forces_within_threshold - primary_metric: forces_mae -gpus: 0 -logger: tensorboard -loss_fns: -- energy: - coefficient: 1 - fn: mae -- forces: - coefficient: 1 - fn: l2mae -model: gemnet_t -model_attributes: - activation: silu - cbf: - name: spherical_harmonics - cutoff: 6.0 - direct_forces: true - emb_size_atom: 512 - emb_size_bil_trip: 64 - emb_size_cbf: 16 - emb_size_edge: 512 - emb_size_rbf: 16 - emb_size_trip: 64 - envelope: - exponent: 5 - name: polynomial - extensive: true - max_neighbors: 50 - num_after_skip: 2 - num_atom: 3 - num_before_skip: 1 - num_blocks: 3 - num_concat: 1 - num_radial: 128 - num_spherical: 7 - otf_graph: true - output_init: HeOrthogonal - rbf: - name: gaussian - regress_forces: true -noddp: false -optim: - batch_size: 16 - clip_grad_norm: 10 - ema_decay: 0.999 - energy_coefficient: 1 - eval_batch_size: 16 - eval_every: 5000 - force_coefficient: 1 - loss_energy: mae - loss_force: atomwisel2 - lr_gamma: 0.8 - lr_initial: 0.0005 - lr_milestones: - - 64000 - - 96000 - - 128000 - - 160000 - - 192000 - max_epochs: 80 - num_workers: 2 - optimizer: AdamW - optimizer_params: - amsgrad: true - warmup_steps: -1 -outputs: - energy: - level: system - forces: - eval_on_free_atoms: true - level: atom - train_on_free_atoms: false -slurm: {} -task: - dataset: ase_db - prediction_dtype: float32 -test_dataset: - a2g_args: - r_energy: false - r_forces: false - select_args: - selection: natoms>5,xc=PBE - src: data.db -trainer: ocp -val_dataset: null - -2024-05-17 16:46:55 (INFO): Loading dataset: ase_db -2024-05-17 16:46:56 (INFO): rank: 0: Sampler created... -2024-05-17 16:46:56 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 16:46:56 (INFO): rank: 0: Sampler created... -2024-05-17 16:46:56 (INFO): Batch balancing is disabled for single GPU training. -2024-05-17 16:46:56 (INFO): Loading model: gemnet_t -2024-05-17 16:46:57 (INFO): Loaded GemNetT with 31671825 parameters. -2024-05-17 16:46:57 (WARNING): Model gradient logging to tensorboard not yet supported. -2024-05-17 16:46:58 (INFO): Loading checkpoint from: /tmp/ocp_checkpoints/gndt_oc22_all_s2ef.pt -2024-05-17 16:46:58 (INFO): Overwriting scaling factors with those loaded from checkpoint. If you're generating predictions with a pretrained checkpoint, this is the correct behavior. To disable this, delete `scale_dict` from the checkpoint. -2024-05-17 16:46:58 (WARNING): Scale factor comment not found in model -2024-05-17 16:46:58 (INFO): Predicting on test. - device 0: 0%| | 0/3 [00:00 fairchem by FAIR Chemistry + +

fairchem by FAIR Chemistry

+#### FAIR-Chem overview + `fairchem` is the [FAIR](https://ai.meta.com/research/) Chemistry's centralized repository of all its data, models, demos, and application efforts for materials science and quantum chemistry. Collaborative projects that contribute or use the models and approaches in this repo: @@ -22,7 +25,7 @@ We re-organized and rebranded the repository in 2024 (previously the `fairchem` general usability of these models beyond catalysis, including things like direct air capture. ``` -### Datasets in `fairchem`: +#### Datasets in `fairchem`: `fairchem` provides training and evaluation code for tasks and models that take arbitrary chemical structures as input to predict energies / forces / positions / stresses, and can be used as a base scaffold for research projects. For an overview of @@ -33,7 +36,7 @@ tasks, data, and metrics, please read the documentations and respective papers: - [OC20Dense](core/datasets/oc20dense) - [OC20NEB](core/datasets/oc20neb) -### Projects and models built on `fairchem`: +#### Projects and models built on `fairchem`: - SchNet [[`arXiv`](https://arxiv.org/abs/1706.08566)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/models/schnet.py)] - DimeNet++ [[`arXiv`](https://arxiv.org/abs/2011.14115)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/main/src/fairchem/core/models/dimenet_plus_plus.py)] @@ -53,7 +56,7 @@ Older model implementations that are no longer supported: - SpinConv [[`arXiv`](https://arxiv.org/abs/2106.09575)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/e7a8745eb307e8a681a1aa9d30c36e8c41e9457e/ocpmodels/models/spinconv.py)] - ForceNet [[`arXiv`](https://arxiv.org/abs/2103.01436)] [[`code`](https://github.com/FAIR-Chem/fairchem/blob/e7a8745eb307e8a681a1aa9d30c36e8c41e9457e/ocpmodels/models/forcenet.py)] -## Discussion +### Discussion For all non-codebase related questions and to keep up-to-date with the latest OCP announcements, please join the [discussion board](https://discuss.opencatalystproject.org/). @@ -61,7 +64,7 @@ announcements, please join the [discussion board](https://discuss.opencatalystpr All code-related questions and issues should be posted directly on our [issues page](https://github.com/FAIR-Chem/fairchem/issues). -## Acknowledgements +### Acknowledgements - This codebase was initially forked from [CGCNN](https://github.com/txie-93/cgcnn) by [Tian Xie](http://txie.me), but has undergone significant changes since. @@ -70,11 +73,11 @@ by [Tian Xie](http://txie.me), but has undergone significant changes since. - It was then developed as the OCP repo, and includes many contributions from the community and collaborators. - Much of the documentation was developed for various papers or as part of a comprehensive tutorial for the 2023 ACS Fall Chemistry conference. -## License +### License `fairchem` is released under the [MIT](https://github.com/FAIR-Chem/fairchem/blob/main/LICENSE.md) license. -## Citing `fairchem` +### Citing `fairchem` If you use this codebase in your work, please consider citing: diff --git a/_static/logo.png b/_static/logo.png deleted file mode 100644 index ffb62c1197..0000000000 Binary files a/_static/logo.png and /dev/null differ diff --git a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html index 1abbaf8cf8..14446143cb 100644 --- a/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html +++ b/autoapi/adsorbml/2023_neurips_challenge/challenge_eval/index.html @@ -8,7 +8,7 @@ - adsorbml.2023_neurips_challenge.challenge_eval — FAIR Chemistry Documentation + adsorbml.2023_neurips_challenge.challenge_eval @@ -146,14 +146,8 @@ - - - - - - FAIR Chemistry Documentation - Home - +