--- title: CamVid Demo keywords: fastai sidebar: home_sidebar nb_path: "05_camvid.ipynb" ---
This is a small demo showing that the functionality of misas is not at all limited to the domain of medical imaging. However, the main use case for the authors of misas is currently medical imaging, therefore this is just a proof of concept. It should give you an idea on how to work with a new dataset. It can be extended if users need it. The example builds on the camvid example in the fastai1 documentation.
Please reach out via GitHub if you have problems using misas on your own dataset.
from fastai.vision.all import *
path = untar_data(URLs.CAMVID_TINY)
codes = np.loadtxt(path/'codes.txt', dtype=str)
codes
fnames = get_image_files(path/"images")
def label_func(fn): return path/"labels"/f"{fn.stem}_P{fn.suffix}"
cam_fn = fnames[0]
mask_fn = label_func(fnames[0])
cam_img = lambda: Image.open(cam_fn).convert("RGB")
mask = lambda: Image.open(mask_fn).convert("I")
dls = SegmentationDataLoaders.from_label_func(
path, bs=8, fnames = fnames, label_func = label_func, codes = codes
)
dls.show_batch(max_n=6)
learn = unet_learner(dls, resnet34)
learn.fine_tune(6)
learn.show_results(max_n=6, figsize=(7,8))
interp = SegmentationInterpretation.from_learner(learn)
interp.plot_top_losses(k=3)
camvid = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
get_items = get_image_files,
get_y = label_func,
splitter=RandomSplitter(),
batch_tfms=aug_transforms(size=(120,160)))
dls = camvid.dataloaders(path/"images", path=path, bs=8)
dls.show_batch(max_n=6)
plt.imshow(cam_img().resize((128,128)))
from misas.core import *
from misas.core import default_cmap
learn.prepareSize = lambda item: item.resize((128,128))
class Fastai_model:
def __init__(self, learner):
self.trainedModel = learner
self.resize128 = lambda x: x.resize ((128,128))
self.trainedModel.remove_cbs(ProgressCallback)
def prepareSize(self, item):
return self.resize128(item)
def predict(self, image):
image = PILImage.create(np.array(image))
output = self.trainedModel.predict(image)
output = PILImage.create(output [0])
output = Image.fromarray(np.array(output)) #mode="I"
return output
Cam_vid = Fastai_model(learn)
plot_series(get_rotation_series(cam_img(), Cam_vid), vmax=31, vmin=0)
plot_series(get_zoom_series(cam_img(), Cam_vid), vmax=31, vmin=0, nrow=2)
found_classes = np.unique(np.array(Cam_vid.predict(cam_img())))
codes[found_classes]
result = eval_rotation_series(cam_img(),mask(),Cam_vid,components=codes)
When plotting the evaluation series, it makes sense to only plot classes that actually occur.
plot_eval_series(result[np.append("deg",codes[found_classes])])