Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reorder model tests to own files. Issue #222 #259

Merged
merged 19 commits into from
Jul 31, 2020
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 103 additions & 0 deletions tests/test_cnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
# -*- coding: utf-8 -*-
import unittest
from mcfly.models import CNN
from test_modelgen import get_default, generate_train_data


class CNNSuite(unittest.TestCase):
"""
Test cases for CNN models.
"""

def test_cnn_starts_with_batchnorm(self):
""" CNN models should always start with a batch normalization layer. """
model_type = CNN((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"fc_hidden_nodes": 100})
assert 'BatchNormalization' in str(type(model.layers[0])), 'Wrong layer type.'


def test_cnn_fc_nodes(self):
""" CNN model should have number of dense nodes defined by user. """
fc_hidden_nodes = 101
model_type = CNN((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"fc_hidden_nodes": fc_hidden_nodes})

dense_layer = [l for l in model.layers if 'Dense' in str(l)][0]
assert dense_layer.output_shape[1] == fc_hidden_nodes, 'Wrong number of fc nodes.'


def test_cnn_batchnorm_dim(self):
""""The output shape of the batchnorm should be (None, nr_timesteps, nr_filters)"""
model_type = CNN((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"fc_hidden_nodes": 100})

batchnormlay = model.layers[2]
assert batchnormlay.output_shape == (None, 20, 32)


def test_cnn_enough_batchnorm(self):
"""CNN model should contain as many batch norm layers as it has activations layers"""
model_type = CNN((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"fc_hidden_nodes": 100})

batch_norm_layers = len([l for l in model.layers if 'BatchNormalization' in str(l)])
activation_layers = len([l for l in model.layers if 'Activation' in str(l)])
assert batch_norm_layers == activation_layers


def test_cnn_metrics(self):
"""CNN model should be compiled with the metrics that we give it"""
metrics = ['accuracy', 'mae']
x_shape = (None, 20, 3)
nr_classes = 2
X_train, y_train = generate_train_data(x_shape, nr_classes)

model_type = CNN(x_shape, nr_classes, metrics=metrics)
model = model_type.create_model(**{"filters": [32, 32],
"fc_hidden_nodes": 100})
model.fit(X_train, y_train, epochs=1)

model_metrics = [m.name for m in model.metrics]
for metric in metrics:
assert metric in model_metrics


def test_CNN_hyperparameters_nrlayers(self):
""" Number of Conv layers from range [4, 4] should be 4. """
custom_settings = get_default()
kwargs = {'cnn_min_layers': 4,
'cnn_max_layers': 4}
# Replace default parameters with input
for key, value in kwargs.items():
if key in custom_settings:
custom_settings[key] = value

model_type = CNN(None, None, **custom_settings)
hyperparams = model_type.generate_hyperparameters()

assert len(hyperparams.get('filters')) == 4


def test_CNN_hyperparameters_fcnodes(self):
""" Number of fc nodes from range [123, 123] should be 123. """
custom_settings = get_default()
kwargs = {'cnn_min_fc_nodes': 123,
'cnn_max_fc_nodes': 123}
# Replace default parameters with input
for key, value in kwargs.items():
if key in custom_settings:
custom_settings[key] = value

model_type = CNN(None, None, **custom_settings)
hyperparams = model_type.generate_hyperparameters()

assert hyperparams.get('fc_hidden_nodes') == 123



if __name__ == '__main__':
unittest.main()
55 changes: 55 additions & 0 deletions tests/test_conv_lstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
import unittest
from mcfly.models import ConvLSTM
from test_modelgen import get_default

class ConvLSTMSuite(unittest.TestCase):
"""
Tests cases for DeepconvLSTM models.
"""

def test_deepconvlstm_batchnorm_dim(self):
"""The output shape of the batchnorm should be (None, nr_timesteps, nr_channels, nr_filters)"""
model_type = ConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"lstm_dims": [32, 32]})

batchnormlay = model.layers[3]
assert batchnormlay.output_shape == (None, 20, 3, 32)

def test_deepconvlstm_enough_batchnorm(self):
"""LSTM model should contain as many batch norm layers as it has activations layers"""
model_type = ConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32, 32],
"lstm_dims": [32, 32, 32]})

batch_norm_layers = len([l for l in model.layers if 'BatchNormalization' in str(l)])
activation_layers = len([l for l in model.layers if 'Activation' in str(l)])
assert batch_norm_layers == activation_layers

def test_DeepConvLSTM_hyperparameters_nrconvlayers(self):
""" Number of Conv layers from range [4, 4] should be 4. """
custom_settings = get_default()
kwargs = {'deepconvlstm_min_conv_layers': 4,
'deepconvlstm_max_conv_layers': 4}
# Replace default parameters with input
for key, value in kwargs.items():
if key in custom_settings:
custom_settings[key] = value

model_type = ConvLSTM(None, None, **custom_settings)
hyperparams = model_type.generate_hyperparameters()

assert len(hyperparams.get('filters')) == 4

def test_deepconvlstm_starts_with_batchnorm(self):
""" DeepConvLSTM models should always start with a batch normalization layer. """
model_type = ConvLSTM((None, 20, 3), 2)
model = model_type.create_model(**{"filters": [32, 32],
"lstm_dims": [32, 32]})

assert 'BatchNormalization' in str(type(model.layers[0])), 'Wrong layer type.'


if __name__ == '__main__':
unittest.main()
97 changes: 97 additions & 0 deletions tests/test_inception_time.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-

import unittest
from mcfly.models import InceptionTime
from test_modelgen import get_default, generate_train_data

class InceptionTimeSuite(unittest.TestCase):
"""
Test cases for InceptionTime models.
"""

# Tests for InceptionTime model:
def test_InceptionTime_starts_with_batchnorm(self):
""" InceptionTime models should always start with a batch normalization layer. """
model_type = InceptionTime((None, 20, 3), 2)
model = model_type.create_model(16)

assert 'BatchNormalization' in str(type(model.layers[1])), 'Wrong layer type.'


def test_InceptionTime_first_inception_module(self):
""" Test layers of first inception module. """
model_type = InceptionTime((None, 20, 3), 2)
model = model_type.create_model(16)

assert 'Conv1D' or 'Convolution1D' in str(type(model.layers[2])), 'Wrong layer type.'
assert 'MaxPooling1D' in str(type(model.layers[3])), 'Wrong layer type.'
assert 'Concatenate' in str(type(model.layers[8])), 'Wrong layer type.'


def test_InceptionTime_depth(self):
""" InceptionTime model should have depth (number of residual modules) as defined by user. """
depths = 3

model_type = InceptionTime((None, 20, 3), 2)
model = model_type.create_model(16, network_depth=depths)

concat_layers = [str(type(l)) for l in model.layers if 'concatenate' in str(type(l)).lower()]
assert len(concat_layers) == depths, 'Wrong number of inception modules (network depths).'


def test_InceptionTime_first_module_dim(self):
""""The output shape throughout the first residual module should be (None, nr_timesteps, min_filters_number)"""
min_filters_number = 16

model_type = InceptionTime((None, 30, 5), 2)
model = model_type.create_model(min_filters_number)

secondConvlayer = model.layers[5]
firstConcatlayer = model.layers[8]
assert secondConvlayer.output_shape == (None, 30, min_filters_number)
assert firstConcatlayer.output_shape == (None, 30, min_filters_number * 4)


def test_InceptionTime_metrics(self):
"""InceptionTime model should be compiled with the metrics that we give it"""
metrics = ['accuracy', 'mae']
x_shape = (None, 20, 3)
nr_classes = 2
X_train, y_train = generate_train_data(x_shape, nr_classes)

model_type = InceptionTime(x_shape, nr_classes, metrics=metrics)
model = model_type.create_model(16)
model.fit(X_train, y_train)

model_metrics = [m.name for m in model.metrics]
for metric in metrics:
assert metric in model_metrics


def test_InceptionTime_hyperparameters(self):
""" Network depth from range [5,5] should be 5.
Maximum kernal size from range [12, 12] should be 12.
Minimum filter number from range [32, 32] should be 32. """
custom_settings = get_default()
x_shape = (None, 20, 3)
kwargs = {'IT_min_network_depth': 5,
'IT_max_network_depth': 5,
'IT_min_max_kernel_size': 10,
'IT_max_max_kernel_size': 10,
'IT_min_filters_number': 32,
'IT_max_filters_number': 32}
# Replace default parameters with input
for key, value in kwargs.items():
if key in custom_settings:
custom_settings[key] = value

model_type = InceptionTime(x_shape, None, **custom_settings)
hyperparams = model_type.generate_hyperparameters()

assert hyperparams.get('network_depth') == 5, 'Wrong network depth'
assert hyperparams.get('max_kernel_size') == 10, 'Wrong kernel'
assert hyperparams.get('filters_number') == 32, 'Wrong filter number'


if __name__ == '__main__':
unittest.main()
Loading