# (C) Copyright IBM Corp. 2019, 2020, 2021, 2022.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# (C) Copyright IBM Corporation 2017, 2018, 2019
# U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted
# by GSA ADP Schedule Contract with IBM Corp.
#
# Author: Leonardo P. Tizzei <ltizzei@br.ibm.com>
import json
from unittest import TestCase
from simulai.regression import DenseNetwork
from simulai.io import Reshaper
from simulai.io import ScalerReshaper
from simulai.rom import POD
from simulai.simulation import Pipeline
import numpy as np
import os
from simulai.math.integration import RK4
import contextlib
import h5py
from simulai.io import BatchCopy
from simulai.io import MapValid
from simulai.models import ModelMaker
from simulai.rom import IPOD
from simulai.metrics import MeanEvaluation
from simulai.io import Sampling
from simulai.batching import batchdomain_constructor
from simulai.workflows import dataset_ipod, pipeline_projection_error
from simulai.utilities import make_temp_directory
[docs]class TestPipeline(TestCase):
# It executes a projection operation using determined data preparer object
[docs] def projection(self, data_preparer=None, mean_component=True):
n_components = 5
exec_data_interval = [0, 77]
project_data_interval = [77, 101]
Nt = int(101)
Ny = int(5)
Nx = int(7)
shape = [Nt, Nx, Ny]
n_features = Nx * Ny
a1 = np.reshape(np.cos(np.linspace(0, 1, n_features)), [1, -1])
a2 = np.reshape(np.cos(np.linspace(0, 2, n_features)), [1, -1])
a3 = np.reshape(np.cos(np.linspace(0, 3, n_features)), [1, -1])
t1 = np.reshape(np.linspace(0, 1, shape[0]), [shape[0], 1])
t2 = np.reshape(np.cos(np.linspace(0, 1, shape[0])), [shape[0], 1])
t3 = np.reshape(np.sin(np.linspace(0, 1, shape[0])), [shape[0], 1])
tt = t1 * a1 + t2 * a2 + t3 * a3 + a1
xx = 2 * (t1 * a1 + t2 * a2 + t3 * a3) + a2
yy = t2 * a1 + t3 * a2 + a3
T = np.reshape(tt, shape)
X = np.reshape(xx, shape)
Y = np.reshape(yy, shape)
Z_array = np.core.records.fromarrays([T[..., None], X[..., None], Y[..., None]],
names=['T', 'X', 'Y'],
formats=['f8', 'f8', 'f8'])
variables_names = Z_array.dtype.names
train_data = Z_array[slice(*exec_data_interval)]
test_data = Z_array[slice(*project_data_interval)]
rom_config = {
'n_components': n_components,
'mean_component': mean_component
}
pipeline = Pipeline(stages=[('data_preparer', data_preparer),
('rom', POD(config=rom_config))],
channels_last=True)
pipeline.exec(input_data=train_data,
data_interval=[0, train_data.shape[0]])
projected = pipeline.project_data(data=test_data,
variables_list=variables_names)
if isinstance(pipeline.rom.data_mean, np.ndarray):
mean = pipeline.rom.data_mean
else:
mean = None
data_reshaped = pipeline.data_preparer.prepare_input_structured_data(data=train_data)
return projected, mean, data_reshaped
[docs] def test_pipeline(self):
model_name = 'testing-model'
# Constructing data
N = 100
Nt = 50
lambd = 10
x = np.linspace(0, 1, N)
y = np.linspace(0, 1, N)
t = np.linspace(0, 1, Nt)
X, T, Y = np.meshgrid(x, t, y)
# Generic function U = exp(-lambd*t)*(x**2*cos(y) + x*y)
U = np.exp(-lambd * T) * (X ** 2 * np.cos(Y) + X * Y)
# Time derivative of U
U_t = -lambd * U
data = np.core.records.fromarrays([U, U_t], names='U, U_t', formats='f8, f8')[:, None]
n_batches = data.shape[0]
# Training data
train_data = data[:int(n_batches / 2), :, :, :]
# Testing data
test_data = data[int(n_batches / 2):, :, :, :]
input_data = np.core.records.fromarrays([train_data['U']], names='U', formats='f8')[:, None]
target_data = np.core.records.fromarrays([train_data['U_t']], names='U_t', formats='f8')[:, None]
# The initial state is used to execute a time-integrator
# as will be seen below
initial_state = input_data[-1:, ...]
n_components = 5
rom_config = {
'n_components': n_components
}
architecture = [50, 50, 50, 50, 50] # Hidden layers only
net_config = {
'dropouts_rates_list': [0, 0, 0, 0, 0],
'l2_reg': 1e-05,
'activation_function': 'elu',
'input_dim': n_components,
'output_dim': n_components
}
neural_net = DenseNetwork(architecture=architecture,
config=net_config)
optimizers_list = {"Adam": {"maxiter": 2000}}
residuals_type = neural_net.output_dim * ['surrogate']
losses = neural_net.output_dim * ['square-mean']
# In order to fit a regression object, it is necessary to invoke a wrapper
# named ModelMaker
model = ModelMaker(regressions=[neural_net], # The list of regressions objects to be used.
optimizers_list=optimizers_list, # The list of optimizers to be used.
residuals_type=residuals_type, # The function of each term in loss function
losses=losses, # The type of loss function for each term
data_residuals=neural_net.outputs_names, # The residuals names
regularizations=[{'l2_reg': 1e-5, 'l1_reg': 0}] # The regularizations penalties (optional)
)
stages = [
('data_preparer', Reshaper()),
('rom', POD(config=rom_config)),
('model', model)
]
pipeline = Pipeline(stages=stages)
extra_kwargs = {'initial_state': initial_state, 'epochs': 25, 'dt': 1, 'resolution': 1}
fit_kwargs = {'normalize_all': False}
pipeline.exec(input_data=input_data,
target_data=target_data,
reference_data=test_data,
extra_kwargs=extra_kwargs,
fit_kwargs=fit_kwargs)
with make_temp_directory() as tmp_dir:
pipeline.save(save_path=tmp_dir, model_name=model_name)
output = pipeline.predict(post_process_op=RK4, extra_kwargs=extra_kwargs)
self.assertIsNotNone(output)
self.assertIsInstance(output, np.ndarray)
[docs] def test_sampling_copy_mean_data_preparer_IPOD_pipeline(self):
n_components = 11
batch_size = 17
exec_data_interval = [0, 77]
project_data_interval = [77, 101]
sampling_choices_fraction = 0.90
Nt = int(101)
Ny = int(5)
Nx = int(7)
x = np.linspace(0, 1, Nx)
y = np.linspace(2, 3, Ny)
t = np.linspace(4, 5, Nt)
T, X, Y = np.meshgrid(t, x, y, indexing='ij')
Z_array = np.core.records.fromarrays([T[..., None], X[..., None], Y[..., None]],
names=['T', 'X', 'Y'],
formats=['f8', 'f8', 'f8'])
data_preparers = [MapValid(config={}), Reshaper(channels_last=True), ScalerReshaper(channels_last=True)]
for data_preparer in data_preparers:
with contextlib.ExitStack() as stack:
with make_temp_directory() as tmp_dir:
test_data = os.path.join(tmp_dir, f'dataset.h5')
with h5py.File(test_data, 'w') as fp:
dataset = fp.create_dataset('data', shape=(Nt, Nx, Ny, 1), dtype=[('T', np.float),
('X', np.float),
('Y', np.float)])
dataset[...] = Z_array
with h5py.File(test_data, 'r') as fp:
dataset = fp.get('data')
variables_names = list(dataset.dtype.names)
exec_dataset_file = os.path.join(tmp_dir, f"exec_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=exec_data_interval, batch_size=batch_size, dump_path=exec_dataset_file)
fp_exec_dataset = stack.enter_context(h5py.File(exec_dataset_file, 'r'))
dataset_exec = fp_exec_dataset.get('data')
test_dataset_file = os.path.join(tmp_dir, f"test_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=project_data_interval, batch_size=batch_size, dump_path=test_dataset_file)
fp_test_dataset = stack.enter_context(h5py.File(test_dataset_file, 'r'))
dataset_test = fp_test_dataset.get('data')
dump_sampler = os.path.join(tmp_dir, f"exec_data_sampled.h5")
sampler = Sampling(choices_fraction=sampling_choices_fraction, shuffling=True)
sampler.prepare_input_structured_data(dataset_exec, batch_size=batch_size, dump_path=dump_sampler)
fp_sampled = stack.enter_context(h5py.File(dump_sampler, 'r'))
sampled_dataset = fp_sampled.get('data')
mean = MeanEvaluation()(dataset=sampled_dataset,
data_interval=[0, sampled_dataset.shape[0]],
batch_size=batch_size,
data_preparer=data_preparer)
rom_config = {
'n_components': n_components,
'mean_component': True
}
pipeline = Pipeline(stages=[('data_preparer', data_preparer),
('rom', IPOD(config=rom_config, data_mean=mean))],
channels_last=True)
pipeline.exec(input_data=sampled_dataset,
data_interval=[0, sampled_dataset.shape[0]],
batch_size=batch_size)
# The projected data usually can be allocated in memory
projected = pipeline.project_data(data=dataset_test,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size)
reconstructed = pipeline.reconstruct_data(data=projected,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size,
dump_path=os.path.join(tmp_dir, f"reconstruction.h5")
)
self.assertTrue(set(dataset.dtype.names) == set(reconstructed.dtype.names))
for n in dataset.dtype.names:
self.assertTrue(dataset_test[n][...].shape == reconstructed[n][...].shape)
self.assertTrue(True, 'Finished')
[docs] def test_sampling_batch_indices_copy_mean_data_preparer_IPOD_pipeline(self):
n_components = 11
batch_size = 17
exec_data_interval = [0, 77]
project_data_interval = [77, 101]
sampling_choices_fraction = 0.90
Nt = int(101)
Ny = int(5)
Nx = int(7)
x = np.linspace(0, 1, Nx)
y = np.linspace(2, 3, Ny)
t = np.linspace(4, 5, Nt)
T, X, Y = np.meshgrid(t, x, y, indexing='ij')
Z_array = np.core.records.fromarrays([T[..., None], X[..., None], Y[..., None]],
names=['T', 'X', 'Y'],
formats=['f8', 'f8', 'f8'])
data_preparers = [MapValid(config={}), Reshaper(channels_last=True)]
for data_preparer in data_preparers:
with contextlib.ExitStack() as stack:
with make_temp_directory() as tmp_dir:
test_data = os.path.join(tmp_dir, f'dataset.h5')
with h5py.File(test_data, 'w') as fp:
dataset = fp.create_dataset('data', shape=(Nt, Nx, Ny, 1), dtype=[('T', np.float),
('X', np.float),
('Y', np.float)])
dataset[...] = Z_array
with h5py.File(test_data, 'r') as fp:
dataset = fp.get('data')
variables_names = list(dataset.dtype.names)
exec_dataset_file = os.path.join(tmp_dir, f"exec_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=exec_data_interval, batch_size=batch_size, dump_path=exec_dataset_file)
fp_exec_dataset = stack.enter_context(h5py.File(exec_dataset_file, 'r'))
dataset_exec = fp_exec_dataset.get('data')
test_dataset_file = os.path.join(tmp_dir, f"test_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=project_data_interval, batch_size=batch_size, dump_path=test_dataset_file)
fp_test_dataset = stack.enter_context(h5py.File(test_dataset_file, 'r'))
dataset_test = fp_test_dataset.get('data')
dump_sampler = os.path.join(tmp_dir, f"exec_data_sampled.h5")
sampler = Sampling(choices_fraction=sampling_choices_fraction, shuffling=True)
sampler.prepare_input_structured_data(dataset_exec, batch_size=batch_size, dump_path=dump_sampler)
fp_sampled = stack.enter_context(h5py.File(dump_sampler, 'r'))
sampled_dataset = fp_sampled.get('data')
mean = MeanEvaluation()(dataset=sampled_dataset,
data_interval=[0, sampled_dataset.shape[0]],
batch_size=batch_size)
rom_config = {
'n_components': n_components,
'mean_component': True
}
pipeline = Pipeline(stages=[('data_preparer', data_preparer),
('rom', IPOD(config=rom_config, data_mean=mean))],
channels_last=True)
pipeline.exec(input_data=dataset_exec,
batch_indices=sampler.indices,
batch_size=batch_size)
# The projected data usually can be allocated in memory
projected = pipeline.project_data(data=dataset_test,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size)
reconstructed = pipeline.reconstruct_data(data=projected,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size,
dump_path=os.path.join(tmp_dir, f"reconstruction.h5")
)
self.assertTrue(set(dataset.dtype.names) == set(reconstructed.dtype.names))
for n in dataset.dtype.names:
self.assertTrue(dataset_test[n][...].shape == reconstructed[n][...].shape)
self.assertTrue(True, 'Finished')
[docs] def test_mini_batching(self):
n = 17
data_intervals = [[n, 2*n],
[n, 2 * n],
[n, 2 * n],
[0, n],
[0, n],
[0, n],
[0, 893],
[0, 893]
]
batch_sizes = [n,
int(n/2),
2*n,
n,
int(n / 2),
2 * n,
15,
893,
]
for data_interval, batch_size in zip(data_intervals, batch_sizes):
batches = batchdomain_constructor(data_interval, batch_size)
self.assertTrue(len(batches) > 0)
self.assertTrue(sum([b[1] - b[0] for b in batches]) == data_interval[1] - data_interval[0], 'Making sure the batch size is correct')
self.assertTrue(batches[0][0] == data_interval[0])
b0 = batches[0][1] - batches[0][0]
for b in batches:
bs = b[1]-b[0]
self.assertTrue((bs >= batch_size) or (bs >= data_interval[1]-data_interval[0]))
self.assertTrue(abs(bs-b0) <= 1)
[docs] def test_sampling_copy_mean_data_preparer_IPOD_pipeline_linear_comb(self):
n_components = 5
batch_size = 13
exec_data_interval = [0, 77]
project_data_interval = [77, 101]
sampling_choices_fraction = 0.90
Nt = int(101)
Ny = int(5)
Nx = int(7)
shape = [Nt, Nx, Ny]
n_features = Nx * Ny
a1 = np.reshape(np.cos(np.linspace(0, 1, n_features)), [1, -1])
a2 = np.reshape(np.cos(np.linspace(0, 2, n_features)), [1, -1])
a3 = np.reshape(np.cos(np.linspace(0, 3, n_features)), [1, -1])
t1 = np.reshape(np.linspace(0, 1, shape[0]), [shape[0], 1])
t2 = np.reshape(np.cos(np.linspace(0, 1, shape[0])), [shape[0], 1])
t3 = np.reshape(np.sin(np.linspace(0, 1, shape[0])), [shape[0], 1])
tt = t1 * a1 + t2 * a2 + t3 * a3 + a1
xx = 2 * (t1 * a1 + t2 * a2 + t3 * a3) + a2
yy = t2 * a1 + t3 * a2 + a3
T = np.reshape(tt, shape)
X = np.reshape(xx, shape)
Y = np.reshape(yy, shape)
Z_array = np.core.records.fromarrays([T[..., None], X[..., None], Y[..., None]],
names=['T', 'X', 'Y'],
formats=['f8', 'f8', 'f8'])
data_preparers = [MapValid(config={}), Reshaper(channels_last=True)]
for data_preparer in data_preparers:
with contextlib.ExitStack() as stack:
with make_temp_directory() as tmp_dir:
test_data = os.path.join(tmp_dir, f'dataset.h5')
with h5py.File(test_data, 'w') as fp:
dataset = fp.create_dataset('data', shape=(Nt, Nx, Ny, 1), dtype=[('T', np.float),
('X', np.float),
('Y', np.float)])
dataset[...] = Z_array
with h5py.File(test_data, 'r') as fp:
dataset = fp.get('data')
variables_names = list(dataset.dtype.names)
exec_dataset_file = os.path.join(tmp_dir, f"exec_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=exec_data_interval, batch_size=batch_size, dump_path=exec_dataset_file)
fp_exec_dataset = stack.enter_context(h5py.File(exec_dataset_file, 'r'))
dataset_exec = fp_exec_dataset.get('data')
test_dataset_file = os.path.join(tmp_dir, f"test_dataset_file.h5")
BatchCopy(channels_last=True).copy(data=dataset, data_interval=project_data_interval, batch_size=batch_size, dump_path=test_dataset_file)
fp_test_dataset = stack.enter_context(h5py.File(test_dataset_file, 'r'))
dataset_test = fp_test_dataset.get('data')
dump_sampler = os.path.join(tmp_dir, f"exec_data_sampled.h5")
sampler = Sampling(choices_fraction=sampling_choices_fraction, shuffling=True)
sampler.prepare_input_structured_data(dataset_exec, batch_size=batch_size, dump_path=dump_sampler)
fp_sampled = stack.enter_context(h5py.File(dump_sampler, 'r'))
sampled_dataset = fp_sampled.get('data')
mean = MeanEvaluation()(dataset=sampled_dataset,
data_interval=[0, sampled_dataset.shape[0]],
batch_size=batch_size,
data_preparer=data_preparer)
rom_config = {
'n_components': n_components,
'mean_component': True
}
pipeline = Pipeline(stages=[('data_preparer', data_preparer),
('rom', IPOD(config=rom_config, data_mean=mean))],
channels_last=True)
pipeline.exec(input_data=sampled_dataset,
data_interval=[0, sampled_dataset.shape[0]],
batch_size=batch_size)
# The projected data usually can be allocated in memory
projected = pipeline.project_data(data=dataset_test,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size)
reconstructed_file = os.path.join(tmp_dir, f"reconstruction.h5")
pipeline.reconstruct_data(data=projected,
data_interval=[0, dataset_test.shape[0]],
variables_list=variables_names,
batch_size=batch_size,
dump_path=os.path.join(tmp_dir, f"reconstruction.h5")
)
with h5py.File(reconstructed_file, 'r') as fpr:
reconstructed = fpr.get('reconstructed_data')
self.assertTrue(set(dataset.dtype.names) == set(reconstructed.dtype.names))
for n in dataset.dtype.names:
self.assertTrue(np.linalg.norm(np.reshape(dataset_test[...][n] - reconstructed[...][n], [-1]), np.inf) < 1e-10)
pipeline.save(save_path=tmp_dir, model_name=f'pipeline.pkl') # test saving
with make_temp_directory() as dataset_ipod_tmp_dir:
reconstructed_dump_path = os.path.join(dataset_ipod_tmp_dir, 'reconstructed.h5')
reconstructed_error_path = os.path.join(dataset_ipod_tmp_dir, 'reconstructed_error.json')
dataset_ipod(data_path=test_data,
data_key='data',
train_interval=exec_data_interval,
sampling_fraction=sampling_choices_fraction,
save_path=dataset_ipod_tmp_dir,
n_components=n_components,
batch_size=batch_size,
reconstructed_dump_path=reconstructed_dump_path,
project_reconstruct_interval=project_data_interval,
error_norm_dump_path=reconstructed_error_path,
data_preparer=data_preparer)
with h5py.File(test_data, 'r') as fp:
dataset = fp.get('data')
with h5py.File(reconstructed_dump_path, 'r') as fpr:
reconstructed = fpr.get('reconstructed_data')
self.assertTrue(set(dataset.dtype.names) == set(reconstructed.dtype.names))
for n in dataset.dtype.names:
self.assertTrue(
np.linalg.norm(np.reshape(dataset[slice(*project_data_interval)][n] - reconstructed[...][n], [-1]),
np.inf) < 1e-10
)
with open(reconstructed_error_path, 'r') as fe:
error = json.load(fe)
for n in dataset.dtype.names:
for t in ['absolute', 'relative']:
for o in ['1', '2', 'inf']:
self.assertAlmostEqual(max(error[n][t][o]), 0.0)
for n_components in [1, 5]:
dataset_name = os.path.splitext(os.path.basename(test_data))[0]
data_key = 'data'
pipeline_path = os.path.join(dataset_ipod_tmp_dir, f'pipeline_{dataset_name}_{data_key}.pkl')
projected_data_path = os.path.join(dataset_ipod_tmp_dir, f"time_series_{dataset_name}_{data_key}.npy")
pipeline_projection_error(
data_path=test_data,
key=data_key,
pipeline_path=pipeline_path,
projected_data_path=projected_data_path,
projected_data_interval=project_data_interval,
n_sub_components=n_components,
save_path=dataset_ipod_tmp_dir)
error_file = os.path.join(dataset_ipod_tmp_dir, f"error_reconstructed_{n_components}_{dataset_name}_{data_key}.json")
with open(error_file, 'r') as fe2:
ee = json.load(fe2)
for n in dataset.dtype.names:
for t in ['absolute', 'relative']:
for o in ['1', '2', 'inf']:
if n_components == 5:
self.assertAlmostEqual(max(ee[n][t][o]), 0.0)
else:
self.assertTrue(max(ee[n][t][o]) > 0.01)
self.assertTrue(True, 'Finished')
[docs] def test_data_preparer_POD_pipeline_linear_comb(self):
n_components = 5
exec_data_interval = [0, 77]
project_data_interval = [77, 101]
Nt = int(101)
Ny = int(5)
Nx = int(7)
shape = [Nt, Nx, Ny]
n_features = Nx * Ny
a1 = np.reshape(np.cos(np.linspace(0, 1, n_features)), [1, -1])
a2 = np.reshape(np.cos(np.linspace(0, 2, n_features)), [1, -1])
a3 = np.reshape(np.cos(np.linspace(0, 3, n_features)), [1, -1])
t1 = np.reshape(np.linspace(0, 1, shape[0]), [shape[0], 1])
t2 = np.reshape(np.cos(np.linspace(0, 1, shape[0])), [shape[0], 1])
t3 = np.reshape(np.sin(np.linspace(0, 1, shape[0])), [shape[0], 1])
tt = t1 * a1 + t2 * a2 + t3 * a3 + a1
xx = 2 * (t1 * a1 + t2 * a2 + t3 * a3) + a2
yy = t2 * a1 + t3 * a2 + a3
T = np.reshape(tt, shape)
X = np.reshape(xx, shape)
Y = np.reshape(yy, shape)
Z_array = np.core.records.fromarrays([T[..., None], X[..., None], Y[..., None]],
names=['T', 'X', 'Y'],
formats=['f8', 'f8', 'f8'])
variables_names = Z_array.dtype.names
train_data = Z_array[slice(*exec_data_interval)]
test_data = Z_array[slice(*project_data_interval)]
rom_config = {
'n_components': n_components,
'mean_component': True
}
data_preparers = [MapValid(config={}), Reshaper(channels_last=True)]
for data_preparer in data_preparers:
pipeline = Pipeline(stages=[('data_preparer', data_preparer),
('rom', POD(config=rom_config))],
channels_last=True)
pipeline.exec(input_data=train_data,
data_interval=[0, train_data.shape[0]])
projected = pipeline.project_data(data=test_data,
variables_list=variables_names)
reconstructed = pipeline.reconstruct_data(data=projected,
variables_list=variables_names,
)
self.assertTrue(set(test_data.dtype.names) == set(reconstructed.dtype.names))
for n in test_data.dtype.names:
self.assertTrue(np.linalg.norm(np.reshape(test_data[...][n] - reconstructed[...][n], [-1]), np.inf) < 1e-10)
self.assertTrue(True, 'Finished')
[docs] def test_if_projection_is_equal(self):
reshaper = Reshaper(channels_last=True)
map_valid = MapValid(config={})
projected_reshaper, mean_reshaper, reshaped_reshaper = self.projection(data_preparer=reshaper)
projected_mapvalid, mean_mapvalid, reshaped_mapvalid = self.projection(data_preparer=map_valid)
self.assertTrue(np.linalg.norm(projected_mapvalid - projected_reshaper, np.inf) < 1e-12)