diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 329f8a9..ef2ee4f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,19 @@ default_language_version: python: python3.10 repos: +- repo: local + hooks: + - id: generate-sphinx-docs + name: Generate Sphinx docs + entry: poetry run sphinx-build -b html docs/source docs/build + language: system - repo: https://github.com/pre-commit/pre-commit-hooks rev: v3.2.0 hooks: - id: trailing-whitespace + exclude: ^docs/source/_autosummary/.* - id: end-of-file-fixer + exclude: ^docs/source/_autosummary/.* - id: check-yaml - id: check-toml - id: check-added-large-files @@ -14,6 +22,7 @@ repos: - id: check-merge-conflict - id: mixed-line-ending args: ['--fix=lf'] + exclude: ^docs/source/_autosummary/.* - repo: https://github.com/psf/black rev: 23.1.0 hooks: diff --git a/README.md b/README.md index 61307db..f65cf20 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # torchcache -[![Lint and Test](https://github.com/meakbiyik/torchcache/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/meakbiyik/torchcache/actions/workflows/ci.yaml) [![codecov](https://codecov.io/gh/meakbiyik/torchcache/graph/badge.svg?token=Oh6mNp0pc8)](https://codecov.io/gh/meakbiyik/torchcache) +[![Lint and Test](https://github.com/meakbiyik/torchcache/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/meakbiyik/torchcache/actions/workflows/ci.yaml) [![Codecov](https://codecov.io/gh/meakbiyik/torchcache/graph/badge.svg?token=Oh6mNp0pc8)](https://codecov.io/gh/meakbiyik/torchcache) [![Documentation Status](https://readthedocs.org/projects/torchcache/badge/?version=latest)](https://torchcache.readthedocs.io/en/latest/?badge=latest) Effortlessly cache PyTorch module outputs on-the-fly with `torchcache`. -The documentation will be available soon at [torchcache.readthedocs.io](https://torchcache.readthedocs.io/en/latest/). +The documentation is available [torchcache.readthedocs.io](https://torchcache.readthedocs.io/en/latest/). - [Features](#features) - [Installation](#installation) @@ -103,7 +103,7 @@ torchcache automatically manages the cache by hashing both: 1. The decorated module (including its source code obtained through `inspect.getsource`) and its args/kwargs. 2. The inputs provided to the module's forward method. -This hash serves as the cache key for the forward method's output per item in a batch. When our MRU (most-recently-used) cache fills up for the given session, the system continues running the forward method and dismisses the oldest output. This MRU strategy streamlines cache invalidation, aligning with the iterative nature of neural network training, without requiring any auxiliary record-keeping. +This hash serves as the cache key for the forward method's output per item in a batch. When our MRU (most-recently-used) cache fills up for the given session, the system continues running the forward method and dismisses the newest output. This MRU strategy streamlines cache invalidation, aligning with the iterative nature of neural network training, without requiring any auxiliary record-keeping. > :warning: **Warning**: To avoid having to calculate the directory size on every forward pass, `torchcache` measures and limits the size of the persistent data created only for the given session. To prevent the persistent cache from growing indefinitely, you should periodically clear the cache directory. Note that if you let `torchcache` create a temporary directory, it will be automatically deleted when the session ends. diff --git a/docs/source/_autosummary/torchcache.set_logger_config.rst b/docs/source/_autosummary/torchcache.set_logger_config.rst index cd9d8ba..9793df9 100644 --- a/docs/source/_autosummary/torchcache.set_logger_config.rst +++ b/docs/source/_autosummary/torchcache.set_logger_config.rst @@ -1,6 +1,6 @@ -torchcache.set\_logger\_config -============================== - -.. currentmodule:: torchcache - -.. autofunction:: set_logger_config +torchcache.set\_logger\_config +============================== + +.. currentmodule:: torchcache + +.. autofunction:: set_logger_config \ No newline at end of file diff --git a/docs/source/_autosummary/torchcache.torchcache._TorchCache.rst b/docs/source/_autosummary/torchcache.torchcache._TorchCache.rst index 4b8990d..37c7c51 100644 --- a/docs/source/_autosummary/torchcache.torchcache._TorchCache.rst +++ b/docs/source/_autosummary/torchcache.torchcache._TorchCache.rst @@ -1,21 +1,27 @@ -torchcache.torchcache.\_TorchCache -================================== - -.. currentmodule:: torchcache.torchcache - -.. autoclass:: _TorchCache - - - .. automethod:: __init__ - - - .. rubric:: Methods - - .. autosummary:: - - ~_TorchCache.__init__ - ~_TorchCache.cache_cleanup - ~_TorchCache.forward_hook - ~_TorchCache.forward_pre_hook - ~_TorchCache.hash_tensor - ~_TorchCache.wrap_module +torchcache.torchcache.\_TorchCache +================================== + +.. currentmodule:: torchcache.torchcache + +.. autoclass:: _TorchCache + + + .. automethod:: __init__ + + + .. rubric:: Methods + + .. autosummary:: + + ~_TorchCache.__init__ + ~_TorchCache.cache_cleanup + ~_TorchCache.forward_hook + ~_TorchCache.forward_pre_hook + ~_TorchCache.hash_tensor + ~_TorchCache.wrap_module + + + + + + \ No newline at end of file diff --git a/docs/source/_autosummary/torchcache.torchcache.rst b/docs/source/_autosummary/torchcache.torchcache.rst index 939a48c..8520cd2 100644 --- a/docs/source/_autosummary/torchcache.torchcache.rst +++ b/docs/source/_autosummary/torchcache.torchcache.rst @@ -1,6 +1,6 @@ -torchcache.torchcache -===================== - -.. currentmodule:: torchcache - -.. autofunction:: torchcache +torchcache.torchcache +===================== + +.. currentmodule:: torchcache + +.. autofunction:: torchcache \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index e187379..7b11961 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,7 +1,7 @@ .. torchcache documentation master file -Welcome to torchcache's documentation! -====================================== +Welcome to torchcache! +====================== `torchcache` offers an effortless way to cache PyTorch module outputs on-the-fly. By caching the outputs of a module, you can save time and resources when running the same pre-trained model on the same inputs multiple times.