--- title: InceptionTimePlus keywords: fastai sidebar: home_sidebar summary: "This is an unofficial PyTorch implementation of InceptionTime (Fawaz, 2019) created by Ignacio Oguiza." description: "This is an unofficial PyTorch implementation of InceptionTime (Fawaz, 2019) created by Ignacio Oguiza." nb_path: "nbs/102b_models.InceptionTimePlus.ipynb" ---
References:
bs = 16
n_vars = 3
seq_len = 51
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
test_eq(InceptionTimePlus(n_vars,c_out)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out,concat_pool=True)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, bottleneck=False)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, residual=False)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, conv_dropout=.5)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, stoch_depth=.5)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars, c_out, seq_len=seq_len, zero_norm=True, flatten=True)(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True,
norm='Instance', zero_norm=True, bn_1st=False, fc_dropout=.5, sa=True, se=True, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])
test_eq(InceptionTimePlus(n_vars,c_out, coord=True, separable=True,
norm='Instance', zero_norm=True, bn_1st=False, act=nn.PReLU, act_kwargs={})(xb).shape, [bs, c_out])
test_eq(total_params(InceptionTimePlus(3, 2))[0], 455490)
test_eq(total_params(InceptionTimePlus(6, 2, **{'coord': True, 'separable': True, 'zero_norm': True}))[0], 77204)
test_eq(total_params(InceptionTimePlus(3, 2, ks=40))[0], total_params(InceptionTimePlus(3, 2, ks=[9, 19, 39]))[0])
bs = 16
n_vars = 3
seq_len = 51
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
model = InceptionTimePlus(n_vars, c_out)
model(xb).shape
test_eq(model[0](xb), model.backbone(xb))
test_eq(model[1](model[0](xb)), model.head(model[0](xb)))
test_eq(model[1].state_dict().keys(), model.head.state_dict().keys())
test_eq(len(ts_splitter(model)), 2)
test_eq(check_bias(InceptionTimePlus(2,3, zero_norm=True), is_conv)[0].sum(), 0)
test_eq(check_weight(InceptionTimePlus(2,3, zero_norm=True), is_bn)[0].sum(), 6)
test_eq(check_weight(InceptionTimePlus(2,3), is_bn)[0], np.array([1., 1., 1., 1., 1., 1., 1., 1.]))
for i in range(10): InceptionTimePlus(n_vars,c_out,stoch_depth=0.8,depth=9,zero_norm=True)(xb)
net = InceptionTimePlus(2,3,**{'coord': True, 'separable': True, 'zero_norm': True})
test_eq(check_weight(net, is_bn)[0], np.array([1., 1., 0., 1., 1., 0., 1., 1.]))
net
bs = 16
n_vars = 3
seq_len = 51
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
test_eq(count_parameters(MultiInceptionTimePlus([1,1,1], c_out)) > count_parameters(MultiInceptionTimePlus(3, c_out)), True)
test_eq(MultiInceptionTimePlus([1,1,1], c_out).to(xb.device)(xb).shape, MultiInceptionTimePlus(3, c_out).to(xb.device)(xb).shape)
bs = 16
n_vars = 3
seq_len = 12
c_out = 10
xb = torch.rand(bs, n_vars, seq_len)
new_head = partial(conv_lin_nd_head, d=(5,2))
net = MultiInceptionTimePlus(n_vars, c_out, seq_len, custom_head=new_head)
print(net.to(xb.device)(xb).shape)
net.head
bs = 16
n_vars = 6
seq_len = 12
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
net = MultiInceptionTimePlus([1,2,3], c_out, seq_len)
print(net.to(xb.device)(xb).shape)
net.head
bs = 8
c_in = 7 # aka channels, features, variables, dimensions
c_out = 2
seq_len = 10
xb2 = torch.randn(bs, c_in, seq_len)
model1 = MultiInceptionTimePlus([2, 5], c_out, seq_len)
model2 = MultiInceptionTimePlus([[0,2,5], [0,1,3,4,6]], c_out, seq_len)
test_eq(model1.to(xb2.device)(xb2).shape, (bs, c_out))
test_eq(model1.to(xb2.device)(xb2).shape, model2.to(xb2.device)(xb2).shape)
from tsai.data.all import *
from tsai.learner import ts_learner
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, [Categorize()]]
batch_tfms = TSStandardize()
ts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
learn = ts_learner(ts_dls, InceptionTimePlus)
xb,yb=first(learn.dls.train)
test_eq(learn.model.to(xb.device)(xb).shape, (ts_dls.bs, ts_dls.c))
p1 = count_parameters(learn.model)
learn.freeze()
p2 = count_parameters(learn.model)
assert p1 > p2 > 0
p1, p2
from tsai.data.all import *
# from tsai.learner import ts_learner
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, [Categorize()]]
batch_tfms = TSStandardize()
ts_dls = get_ts_dls(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms)
learn = ts_learner(ts_dls, MultiInceptionTimePlus, c_in=[4, 15, 5])
xb,yb=first(learn.dls.train)
test_eq(learn.model.to(xb.device)(xb).shape, (ts_dls.bs, ts_dls.c))
p1 = count_parameters(learn.model)
learn.freeze()
p2 = count_parameters(learn.model)
assert p1 > p2 > 0
p1, p2