# (C) Copyright IBM Corp. 2019, 2020, 2021, 2022.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from unittest import TestCase
from simulai.regression import DenseNetwork
from simulai.models import ModelMaker
from simulai.metrics import L2Norm
[docs]class TestDenseNetwork(TestCase):
[docs] def setUp(self) -> None:
self.errors = list()
[docs] def test_identity_dense_network_default_named_outputs(self) -> None:
# One-dimensional case
# Constructing dataset
train_factor = 0.50
N = 10
Nt = 2000
N_train = int(train_factor*Nt)
# Constructing dataset
x = np.linspace(0, 1, N)
t = np.linspace(0, 1, Nt)
i = np.linspace(1, 10, N)
j = np.linspace(1, 10, Nt)
T, X = np.meshgrid(t, x, indexing='ij')
J, I = np.meshgrid(j, i, indexing='ij')
Z = np.sin(J*np.pi*T)*np.cos(I*np.pi*X)
train_data = Z[:N_train, :]
test_data = Z[N_train:, :]
architecture = [50, 50, 50]
test_setup = {
'architecture': architecture,
'dropouts_rates_list': [0, 0, 0],
'learning_rate': 1e-05,
'l2_reg': 1e-05,
'activation_function': 'elu',
'loss_function': 'mse',
'optimizer': 'adam',
'input_dim': N,
'output_dim': N
}
neural_net = DenseNetwork(architecture=architecture,
config=test_setup)
optimizers_list = {"Adam": {"maxiter": 2000}}
residuals_type = neural_net.output_dim*['surrogate']
losses = neural_net.output_dim*['square-mean']
# In order to fit a regression object, it is necessary to invoke a wrapper
# named ModelMaker
model = ModelMaker(regressions=[neural_net], # The list of regressions objects to be used.
optimizers_list=optimizers_list, # The list of optimizers to be used.
residuals_type=residuals_type, # The function of each term in loss function
losses=losses, # The type of loss function for each term
data_residuals=neural_net.outputs_names, # The residuals names
regularizations=[{'l2_reg': 1e-5, 'l1_reg': 0}] # The regularizations penalties (optional)
)
# The model receives lists of input and output arrays, subsequently divided among the
# input and output variables of the regressions objects existent in the common environment
model.fit(input_data_list=[train_data],
target_data_list=[train_data],
normalize_all=True,
shuffle=False)
approximated_test_data = model.eval(input_cube=test_data)
l2_norm = L2Norm()
error = l2_norm(data=approximated_test_data,
reference_data=test_data,
relative_norm=True)
print(f'The approximation error was {100*error} %')
self.errors.append(error)
[docs] def test_identity_dense_network_without_names(self) -> None:
# One-dimensional case
# Constructing dataset
train_factor = 0.50
N = 10
Nt = 2000
N_train = int(train_factor*Nt)
# Constructing dataset
x = np.linspace(0, 1, N)
t = np.linspace(0, 1, Nt)
i = np.linspace(1, 10, N)
j = np.linspace(1, 10, Nt)
T, X = np.meshgrid(t, x, indexing='ij')
J, I = np.meshgrid(j, i, indexing='ij')
Z = np.sin(J*np.pi*T)*np.cos(I*np.pi*X)
train_data = Z[:N_train, :]
test_data = Z[N_train:, :]
architecture = [50, 50, 50]
test_setup = {
'architecture': architecture,
'dropouts_rates_list': [0, 0, 0],
'learning_rate': 1e-05,
'l2_reg': 1e-05,
'activation_function': 'elu',
'loss_function': 'mse',
'optimizer': 'adam',
'input_dim': N,
'output_dim': N
}
neural_net = DenseNetwork(architecture=architecture,
config=test_setup,
concat_input_tensor=True,
concat_output_tensor=True)
optimizers_list = {"Adam": {"maxiter": 2000}}
residuals_type = ['surrogate']
losses = ['square-mean']
# In order to fit a regression object, it is necessary to invoke a wrapper
# named ModelMaker
model = ModelMaker(regressions=[neural_net], # The list of regressions objects to be used.
optimizers_list=optimizers_list, # The list of optimizers to be used.
residuals_type=residuals_type, # The function of each term in loss function
losses=losses, # The type of loss function for each term
data_residuals=['output'], # The residuals names
regularizations=[{'l2_reg': 1e-5, 'l1_reg': 0}] # The regularizations penalties (optional)
)
# The model receives lists of input and output arrays, subsequently divided among the
# input and output variables of the regressions objects existent in the common environment
model.fit(input_data_list=[train_data],
target_data_list=[train_data],
normalize_all=True,
shuffle=False)
approximated_test_data = model.eval(input_cube=test_data)
l2_norm = L2Norm()
error = l2_norm(data=approximated_test_data,
reference_data=test_data,
relative_norm=True)
print(f'The approximation error was {100*error} %')
self.errors.append(error)