--- title: Models utils keywords: fastai sidebar: home_sidebar summary: "Utility functions used to build PyTorch timeseries models." description: "Utility functions used to build PyTorch timeseries models." nb_path: "nbs/100b_models.utils.ipynb" ---
{% raw %}
{% endraw %} {% raw %}
{% endraw %} {% raw %}

get_layers[source]

get_layers(model, cond=noop, full=True)

{% endraw %} {% raw %}

is_layer[source]

is_layer(*args)

{% endraw %} {% raw %}

is_linear[source]

is_linear(l)

{% endraw %} {% raw %}

is_bn[source]

is_bn(l)

{% endraw %} {% raw %}

is_conv_linear[source]

is_conv_linear(l)

{% endraw %} {% raw %}

is_affine_layer[source]

is_affine_layer(l)

{% endraw %} {% raw %}

is_conv[source]

is_conv(l)

{% endraw %} {% raw %}

has_bias[source]

has_bias(l)

{% endraw %} {% raw %}

has_weight[source]

has_weight(l)

{% endraw %} {% raw %}

has_weight_or_bias[source]

has_weight_or_bias(l)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

check_bias[source]

check_bias(m, cond=noop, verbose=False)

{% endraw %} {% raw %}

check_weight[source]

check_weight(m, cond=noop, verbose=False)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

get_nf[source]

get_nf(m)

Get nf from model's first linear layer in head

{% endraw %} {% raw %}
{% endraw %} {% raw %}

ts_splitter[source]

ts_splitter(m)

Split of a model between body and head

{% endraw %} {% raw %}

transfer_weights[source]

transfer_weights(model, weights_path:Path, device:device=None, exclude_head:bool=True)

Utility function that allows to easily transfer weights between models. Taken from the great self-supervised repository created by Kerem Turgutlu. https://github.com/KeremTurgutlu/self_supervised/blob/d87ebd9b4961c7da0efd6073c42782bbc61aaa2e/self_supervised/utils.py

{% endraw %} {% raw %}

build_ts_model[source]

build_ts_model(arch, c_in=None, c_out=None, seq_len=None, d=None, dls=None, device=None, verbose=False, pretrained=False, weights_path=None, exclude_head=True, cut=-1, init=None, arch_config={}, **kwargs)

{% endraw %} {% raw %}

build_tabular_model[source]

build_tabular_model(arch, dls, layers=None, emb_szs=None, n_out=None, y_range=None, device=None, arch_config={}, ps=None, embed_p=0.0, use_bn=True, bn_final=False, bn_cont=True, act_cls=ReLU(inplace=True), lin_first=True)

{% endraw %} {% raw %}

build_tsimage_model[source]

build_tsimage_model(arch, c_in=None, c_out=None, dls=None, pretrained=False, device=None, verbose=False, init=None, arch_config={}, p=0.0, n_out=1000, stem_szs=(32, 32, 64), widen=1.0, sa=False, act_cls=ReLU, ndim=2, ks=3, stride=2, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1, sym=False, norm_type=<NormType.Batch: 1>, pool=AvgPool, pool_first=True, padding=None, bias=None, bn_1st=True, transpose=False, xtra=None, bias_std=0.01, dilation:Tuple[~T, ~T]][int]=1, padding_mode:str='zeros', dtype=None)

{% endraw %} {% raw %}

count_parameters[source]

count_parameters(model, trainable=True)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
from tsai.data.external import get_UCR_data
from tsai.data.features import get_ts_features
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
ts_features_df = get_ts_features(X, y)
Feature Extraction: 100%|██████████| 40/40 [00:07<00:00,  5.51it/s]
{% endraw %} {% raw %}
from tsai.data.tabular import get_tabular_dls
from tsai.models.TabModel import TabModel
cat_names = None
cont_names = ts_features_df.columns[:-2]
y_names = 'target'
tab_dls = get_tabular_dls(ts_features_df, cat_names=cat_names, cont_names=cont_names, y_names=y_names, splits=splits)
tab_model = build_tabular_model(TabModel, dls=tab_dls)
b = first(tab_dls.train)
test_eq(tab_model(*b[:-1]).shape, (64,6))
{% endraw %} {% raw %}
a = 'MLSTM_FCN'
if sum([1 for v in ['RNN_FCN', 'LSTM_FCN', 'GRU_FCN', 'OmniScaleCNN', 'Transformer', 'mWDN'] if v in a]): print(1)
1
{% endraw %} {% raw %}

get_clones[source]

get_clones(module, N)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
m = nn.Conv1d(3,4,3)
get_clones(m, 3)
ModuleList(
  (0): Conv1d(3, 4, kernel_size=(3,), stride=(1,))
  (1): Conv1d(3, 4, kernel_size=(3,), stride=(1,))
  (2): Conv1d(3, 4, kernel_size=(3,), stride=(1,))
)
{% endraw %} {% raw %}

split_model[source]

split_model(m)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

output_size_calculator[source]

output_size_calculator(m, c_in, seq_len)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
c_in = 3
seq_len = 30
m = nn.Conv1d(3, 12, kernel_size=3, stride=2)
new_c_in, new_seq_len = output_size_calculator(m, c_in, seq_len)
test_eq((new_c_in, new_seq_len), (12, 14))
{% endraw %} {% raw %}

change_model_head[source]

change_model_head(model, custom_head, **kwargs)

Replaces a model's head by a custom head as long as the model has a head, head_nf, c_out and seq_len attributes

{% endraw %} {% raw %}
{% endraw %} {% raw %}

naive_forecaster[source]

naive_forecaster(o, split, horizon=1)

{% endraw %} {% raw %}

true_forecaster[source]

true_forecaster(o, split, horizon=1)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
a = np.random.rand(20).cumsum()
split = np.arange(10, 20)
a, naive_forecaster(a, split, 1), true_forecaster(a, split, 1)
(array([0.94194527, 1.92805741, 2.91427376, 3.32037788, 3.66698214,
        3.9496654 , 4.08584875, 4.57943479, 4.69926643, 5.2446612 ,
        5.77533105, 6.62208335, 6.79110137, 6.93756308, 7.18032836,
        7.23031503, 7.92323579, 8.32851529, 8.99181289, 9.60863212]),
 array([5.2446612 , 5.77533105, 6.62208335, 6.79110137, 6.93756308,
        7.18032836, 7.23031503, 7.92323579, 8.32851529, 8.99181289]),
 array([5.77533105, 6.62208335, 6.79110137, 6.93756308, 7.18032836,
        7.23031503, 7.92323579, 8.32851529, 8.99181289, 9.60863212]))
{% endraw %}