Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Aug 3, 2024
1 parent f307e8d commit b9c6dbd
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 22 deletions.
3 changes: 1 addition & 2 deletions docs/user_guide/neuroevolution.md
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,7 @@ class CustomNE(NEProblem):
def _network_constants(self):
return {"d": self._d} # Pass self._d as 'd' to networks at instantiation

def _evaluate_network(self, network: torch.nn.Module):
...
def _evaluate_network(self, network: torch.nn.Module): ...
```

which then allows, for example, the use of the variable `d` in the string representation of the network
Expand Down
3 changes: 1 addition & 2 deletions src/evotorch/algorithms/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,7 @@ def f(x: torch.Tensor) -> torch.Tensor:
class MyProblem(Problem):
def __init__(self):
...
def __init__(self): ...
def _evaluate_batch(self, batch: SolutionBatch):
# Stateful batch evaluation code goes here
Expand Down
8 changes: 4 additions & 4 deletions src/evotorch/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,8 +610,9 @@ def __init__(self):
# for initializing solutions
)
def _evaluate_batch(self, solutions: SolutionBatch):
... # code to compute and fill the fitnesses goes here
def _evaluate_batch(
self, solutions: SolutionBatch
): ... # code to compute and fill the fitnesses goes here
def _fill(self, values: torch.Tensor):
# `values` is an empty tensor of shape (n, m) where n is the number
Expand Down Expand Up @@ -749,8 +750,7 @@ def _evaluate_batch(self, solutions: SolutionBatch):
from evotorch.logging import StdOutLogger
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
prob = Problem("min", f, solution_length=..., dtype=torch.float32)
Expand Down
18 changes: 6 additions & 12 deletions src/evotorch/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,7 @@ def f(x: torch.Tensor) -> torch.Tensor:
@on_device("cuda")
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
problem = Problem(
Expand Down Expand Up @@ -306,8 +305,7 @@ def _evaluate_batch(self, solutions: SolutionBatch):
```python
@on_device("cpu")
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
print(f.device) # Prints: torch.device("cpu")
Expand Down Expand Up @@ -499,8 +497,7 @@ def f(x: torch.Tensor) -> torch.Tensor:
@on_aux_device
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
problem = Problem(
Expand Down Expand Up @@ -632,8 +629,7 @@ def expects_ndim( # noqa: C901
@expects_ndim(2, 1)
def f(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
...
def f(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: ...
```
Once decorated like this, the function `f` will gain the following
Expand Down Expand Up @@ -663,8 +659,7 @@ def f(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
```python
@expects_ndim(2, 1, randomness="error")
def f(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
...
def f(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: ...
```
If `randomness` is set as "error", then, when there is batching, any
Expand Down Expand Up @@ -941,8 +936,7 @@ def fitness(decision_values: torch.Tensor) -> torch.Tensor:
```python
@rowwise(randomness="error")
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
```
If `randomness` is set as "error", then, when there is batching, any
Expand Down
3 changes: 1 addition & 2 deletions src/evotorch/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1395,8 +1395,7 @@ def make_functional_grad_estimator(
)
def f(x: torch.Tensor) -> torch.Tensor:
...
def f(x: torch.Tensor) -> torch.Tensor: ...
fgrad = make_functional_grad_estimator(
Expand Down

0 comments on commit b9c6dbd

Please sign in to comment.