Skip to content

Commit e795992

Browse files
authored
v0.4.11 (#7)
* v0.4.11 --------- Co-authored-by: Roy Sadaka <[email protected]>
1 parent a36392f commit e795992

18 files changed

+285
-189
lines changed

CHANGELOG.txt

+6
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
Change Log
22
==========
33

4+
0.4.11 (08/02/2023)
5+
-----------------
6+
* Dense custom layer to support apply norm (configurable to before or after activation)
7+
* StatsPrint callback to support printing best confusion matrix when at least one of the metrics is of type ``MetricConfusionMatrixBase``
8+
* Some minor cosmetic changes
9+
410

511
0.4.10 (04/02/2023)
612
-----------------

README.md

+8-9
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,23 @@ There are 2 types of ``lpd`` packages available
2525
pip install lpd-nodeps
2626
```
2727

28-
<b>[v0.4.10-beta](https://github.com/RoySadaka/lpd/releases) Release - contains the following:</b>
28+
<b>[v0.4.11-beta](https://github.com/RoySadaka/lpd/releases) Release - contains the following:</b>
29+
30+
* ``Dense`` custom layer to support apply norm (configurable to before or after activation)
31+
* ``StatsPrint`` callback to support printing best confusion matrix when at least one of the metrics is of type ``MetricConfusionMatrixBase``
32+
* Some minor cosmetic changes
2933

30-
* ``TransformerEncoderStack`` to support activation as input
31-
* ``PositionalEncoding`` to support more than 3 dimensions input
3234

3335

3436
Previously on lpd:
37+
* ``TransformerEncoderStack`` to support activation as input
38+
* ``PositionalEncoding`` to support more than 3 dimensions input
3539
* Updated Pipfile
3640
* Fixed confusion matrix cpu/gpu device error
3741
* Better handling on callbacks where apply_on_states=None (apply on all states)
3842
* Bug fix in case validation samples are empty
3943
* Bug fix in verbosity level 2 in train
4044
* Verbosity change in torch_utils
41-
* Fix to PositionalEncoding to be batch first
42-
* Minor change to MatMul2D, use torch.matmul instead of torch.bmm
43-
* Bug fix when saving full trainer that has tensorboard callback
44-
* Added LossOptimizerHandlerAccumulateSamples
45-
* Added LossOptimizerHandlerAccumulateBatches
4645

4746

4847
## Usage
@@ -271,7 +270,7 @@ only after the defined num of batches (or samples) were accumulated
271270
```
272271
Output example:
273272

274-
![EpochSummary](https://raw.githubusercontent.com/RoySadaka/ReposMedia/main/lpd/images/epoch_summary.png)
273+
![EpochSummary](https://raw.githubusercontent.com/RoySadaka/ReposMedia/main/lpd/images/epoch_summary_0_4_11.png)
275274

276275

277276

examples/confusion_matrix/train.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def get_trainer(N, D_in, H, D_out, num_epochs, num_classes, data_loader, data_lo
5050

5151
callbacks = [
5252
LossOptimizerHandler(),
53-
StatsPrint(print_confusion_matrix=True)
53+
StatsPrint(print_confusion_matrix_normalized=True)
5454
]
5555

5656
trainer = Trainer(model=model,

examples/multiple_inputs/model.py

+12-6
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from lpd.extensions.custom_layers import TransformerEncoderStack, Attention, MatMul2D
77
from lpd.enums import Phase, State, MonitorType, MonitorMode, StatsType
88
from lpd.callbacks import StatsPrint, ModelCheckPoint, Tensorboard, EarlyStopping, SchedulerStep, LossOptimizerHandler, CallbackMonitor
9-
from lpd.metrics import BinaryAccuracyWithLogits, TruePositives
9+
from lpd.metrics import BinaryAccuracyWithLogits, TruePositives, FalsePositives
1010
from lpd.extensions.custom_schedulers import DoNothingToLR
1111
import lpd.utils.torch_utils as tu
1212

@@ -82,7 +82,8 @@ def get_trainer(config,
8282

8383
metrics = [
8484
BinaryAccuracyWithLogits(name='Accuracy'),
85-
TruePositives(num_classes=2, threshold=0, name='TP')
85+
TruePositives(num_classes=2, threshold=0, name='TP'),
86+
FalsePositives(num_classes=2, threshold=0, name='FP')
8687
]
8788

8889
callbacks = [
@@ -94,10 +95,15 @@ def get_trainer(config,
9495
EarlyStopping(apply_on_phase=Phase.EPOCH_END,
9596
apply_on_states=State.EXTERNAL,
9697
callback_monitor=CallbackMonitor(monitor_type=MonitorType.LOSS,
97-
stats_type=StatsType.VAL,
98-
monitor_mode=MonitorMode.MIN,
99-
patience=config.EARLY_STOPPING_PATIENCE)),
100-
StatsPrint(apply_on_phase=Phase.EPOCH_END, round_values_on_print_to=7, print_confusion_matrix_normalized=True),
98+
stats_type=StatsType.VAL,
99+
monitor_mode=MonitorMode.MIN,
100+
patience=config.EARLY_STOPPING_PATIENCE)),
101+
StatsPrint(apply_on_phase=Phase.EPOCH_END,
102+
round_values_on_print_to=7,
103+
print_confusion_matrix_normalized=True,
104+
train_best_confusion_matrix_monitor=CallbackMonitor(monitor_type=MonitorType.LOSS,
105+
stats_type=StatsType.TRAIN,
106+
monitor_mode=MonitorMode.MIN)),
101107
ModelCheckPoint(checkpoint_dir=checkpoint_dir,
102108
checkpoint_file_name=checkpoint_file_name,
103109
callback_monitor=CallbackMonitor(monitor_type=MonitorType.LOSS,

lpd/callbacks/__init__.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from lpd.callbacks.callback_base import CallbackBase
2-
from lpd.callbacks.callback_monitor import CallbackMonitor, CallbackMonitorResult
2+
from lpd.callbacks.callback_monitor import CallbackMonitor
3+
from lpd.callbacks.callback_monitor_result import CallbackMonitorResult
34
from lpd.callbacks.stats_print import StatsPrint
45
from lpd.callbacks.model_checkpoint import ModelCheckPoint
56
from lpd.callbacks.tensorboard import Tensorboard
@@ -11,6 +12,4 @@
1112
from lpd.callbacks.loss_optimizer_handler import LossOptimizerHandler
1213
from lpd.callbacks.loss_optimizer_handler_base import LossOptimizerHandlerBase
1314
from lpd.callbacks.loss_optimizer_handler_accumulate_batchs import LossOptimizerHandlerAccumulateBatches
14-
from lpd.callbacks.loss_optimizer_handler_accumulate_samples import LossOptimizerHandlerAccumulateSamples
15-
16-
15+
from lpd.callbacks.loss_optimizer_handler_accumulate_samples import LossOptimizerHandlerAccumulateSamples

lpd/callbacks/callback_monitor.py

+13-43
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from lpd.callbacks.callback_monitor_result import CallbackMonitorResult
12
from lpd.enums import Phase, State, MonitorType, MonitorMode, StatsType
23
from lpd.callbacks.callback_context import CallbackContext
34
from typing import Union, List, Optional, Dict
@@ -6,7 +7,6 @@
67

78
from lpd.utils.threshold_checker import ThresholdChecker, AbsoluteThresholdChecker
89

9-
1010
class CallbackMonitor:
1111
"""
1212
Will check if the desired metric improved with support for patience
@@ -15,14 +15,14 @@ class CallbackMonitor:
1515
(negative number will set to inf)
1616
monitor_type - e.g lpd.enums.MonitorType.LOSS
1717
stats_type - e.g lpd.enums.StatsType.VAL
18-
monitor_mode - e.g. lpd.enums.MonitorMode.MIN, min wothh check if the metric decreased, MAX will check for increase
18+
monitor_mode - e.g. lpd.enums.MonitorMode.MIN, will check if the metric decreased, MonitorMode.MAX will check for increase
1919
metric_name - in case of monitor_mode=lpd.enums.MonitorMode.METRIC, provide metric_name, otherwise, leave it None
2020
threshold_checker - to check if the criteria was met, if None, AbsoluteThresholdChecker with threshold=0.0 will be used
2121
"""
22-
def __init__(self, monitor_type: MonitorType,
23-
stats_type: StatsType,
22+
def __init__(self, monitor_type: MonitorType,
23+
stats_type: StatsType,
2424
monitor_mode: MonitorMode,
25-
patience: int=None,
25+
patience: int=None,
2626
metric_name: Optional[str]=None,
2727
threshold_checker: Optional[ThresholdChecker]=None):
2828
self.patience = inf if patience is None or patience < 0 else patience
@@ -32,9 +32,9 @@ def __init__(self, monitor_type: MonitorType,
3232
self.monitor_mode = monitor_mode
3333
self.threshold_checker = AbsoluteThresholdChecker(monitor_mode) if threshold_checker is None else threshold_checker
3434
self.metric_name = metric_name
35-
self.minimum = torch.tensor(inf)
36-
self.maximum = torch.tensor(-inf)
37-
self.previous = self._get_best()
35+
self.minimum = None
36+
self.maximum = None
37+
self.previous = None
3838
self.description = self._get_description()
3939
self._track_invoked = False
4040

@@ -47,7 +47,7 @@ def _get_description(self):
4747
def _get_best(self):
4848
return self.minimum if self.monitor_mode == MonitorMode.MIN else self.maximum
4949

50-
def track(self, callback_context: CallbackContext):
50+
def track(self, callback_context: CallbackContext) -> CallbackMonitorResult:
5151
c = callback_context #READABILITY DOWN THE ROAD
5252

5353
# EXTRACT value_to_consider
@@ -70,8 +70,9 @@ def track(self, callback_context: CallbackContext):
7070
value_to_consider = metrics_to_consider[self.metric_name]
7171

7272
if not self._track_invoked:
73-
self.minimum = -torch.log(torch.zeros_like(value_to_consider)) # [[inf,inf,inf,inf]]
74-
self.maximum = torch.log(torch.zeros_like(value_to_consider)) # [[-inf,-inf,-inf,-inf]]
73+
self.minimum = -torch.log(torch.zeros_like(value_to_consider)) # [[inf,...,inf]]
74+
self.maximum = torch.log(torch.zeros_like(value_to_consider)) # [[-inf,...,-inf]]
75+
self.previous = self._get_best()
7576
self._track_invoked = True
7677

7778

@@ -80,13 +81,11 @@ def track(self, callback_context: CallbackContext):
8081
change_from_previous = value_to_consider - self.previous
8182
curr_best = self._get_best()
8283
change_from_best = value_to_consider - curr_best
83-
curr_minimum = self.minimum
84-
curr_maximum = self.maximum
8584
self.minimum = torch.min(self.minimum, value_to_consider)
8685
self.maximum = torch.max(self.maximum, value_to_consider)
8786
curr_previous = self.previous
8887
self.previous = value_to_consider
89-
did_improve = False
88+
did_improve = False # UNLESS SAID OTHERWISE
9089
new_best = self._get_best()
9190
name = self.metric_name if self.metric_name else 'loss'
9291

@@ -109,32 +108,3 @@ def track(self, callback_context: CallbackContext):
109108
patience_left=self.patience_countdown,
110109
description=self.description,
111110
name = name)
112-
113-
114-
class CallbackMonitorResult():
115-
def __init__(self, did_improve: bool,
116-
new_value: float,
117-
prev_value: float,
118-
new_best: float,
119-
prev_best: float,
120-
change_from_previous: float,
121-
change_from_best: float,
122-
patience_left: int,
123-
description: str,
124-
name: str):
125-
self.name = name
126-
self.did_improve = did_improve
127-
self.new_value = new_value
128-
self.prev_value = prev_value
129-
self.new_best = new_best
130-
self.prev_best = prev_best
131-
self.change_from_previous = change_from_previous
132-
self.change_from_best = change_from_best
133-
self.patience_left = patience_left
134-
self.description = description
135-
136-
def has_improved(self):
137-
return self.did_improve
138-
139-
def has_patience(self):
140-
return self.patience_left > 0
+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
class CallbackMonitorResult():
2+
def __init__(self, did_improve: bool,
3+
new_value: float,
4+
prev_value: float,
5+
new_best: float,
6+
prev_best: float,
7+
change_from_previous: float,
8+
change_from_best: float,
9+
patience_left: int,
10+
description: str,
11+
name: str):
12+
self.name = name
13+
self.did_improve = did_improve
14+
self.new_value = new_value
15+
self.prev_value = prev_value
16+
self.new_best = new_best
17+
self.prev_best = prev_best
18+
self.change_from_previous = change_from_previous
19+
self.change_from_best = change_from_best
20+
self.patience_left = patience_left
21+
self.description = description
22+
23+
def has_improved(self):
24+
return self.did_improve
25+
26+
def has_patience(self):
27+
return self.patience_left > 0

lpd/callbacks/scheduler_step.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
from lpd.enums import Phase, State, MonitorType, MonitorMode, StatsType
1+
from lpd.enums import Phase, State
22
from lpd.callbacks.callback_base import CallbackBase
33
from lpd.callbacks.callback_context import CallbackContext
4-
from lpd.callbacks.callback_monitor import CallbackMonitorResult
54
from lpd.utils.torch_utils import get_lrs_from_optimizer
65
from typing import Union, List, Optional, Dict, Callable
76

@@ -12,7 +11,7 @@ class SchedulerStep(CallbackBase):
1211
apply_on_phase - see in CallbackBase
1312
apply_on_states - see in CallbackBase
1413
scheduler_parameters_func - Since some schedulers takes parameters in step(param1, param2...)
15-
And other schedulers step() are parameterless, provide:
14+
And other schedulers step() are parameter-less, provide:
1615
a function (or lambda) that except CallbackContext and returns whatever information needed,
1716
e.g. for scheduler that takes val_loss as parameter, initialize like this:
1817
SchedulerStep(scheduler_parameters_func=lambda callback_context: callback_context.val_stats.get_loss())

0 commit comments

Comments
 (0)