from fastai.vision.all import *
import numpy as np
from torch.nn.modules.loss import _Loss
import segmentation_models_pytorch as smp
from steel_segmentation.utils import get_train_df
from steel_segmentation.transforms import SteelDataBlock, SteelDataLoaders
path = Path("../data")
train_pivot = get_train_df(path=path, pivot=True)
block = SteelDataBlock(path)
dls = SteelDataLoaders(block, train_pivot, bs=8)
xb, yb = dls.one_batch()
print(xb.shape, xb.device)
print(yb.shape, yb.device)
C:\Users\beanTech\miniconda3\envs\steel_segmentation\lib\site-packages\torch\_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.
To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at  ..\aten\src\ATen\native\BinaryOps.cpp:467.)
  return torch.floor_divide(self, other)
torch.Size([8, 3, 224, 1568]) cuda:0
torch.Size([8, 4, 224, 1568]) cpu
device = "cuda" if torch.cuda.is_available() else "cpu"
device
'cuda'
model = smp.Unet("resnet18", classes=4).to(device)

logits = model(xb)
probs = torch.sigmoid(logits)
preds = ( probs > 0.5).float()
C:\Users\beanTech\miniconda3\envs\steel_segmentation\lib\site-packages\torch\nn\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at  ..\c10/core/TensorImpl.h:1156.)
  return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)

class SoftDiceLoss[source]

SoftDiceLoss() :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

criterion = SoftDiceLoss()
criterion(logits.detach().cpu(), yb)
TensorImage(0.9883)

class WeightedSoftDiceLoss[source]

WeightedSoftDiceLoss(size_average=True, weight=[0.2, 0.8]) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

criterion = WeightedSoftDiceLoss()
criterion(logits.detach().cpu(), yb)
TensorMask(0.9471)

class SoftBCEDiceLoss[source]

SoftBCEDiceLoss(bce_pos_weight, size_average=True, dice_weights=[0.2, 0.8], loss_weights=[0.7, 0.3]) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

criterion = SoftBCEDiceLoss(bce_pos_weight=1.5)
criterion(logits.detach().cpu(), yb)
TensorBase(0.7872)

class MultiClassesSoftBCEDiceLoss[source]

MultiClassesSoftBCEDiceLoss(classes_num=4, size_average=True, dice_weights=[0.2, 0.8], bce_pos_weights=[2.0, 2.0, 1.0, 1.5], loss_weights=[0.7, 0.3], thresh=0.5) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

criterion = MultiClassesSoftBCEDiceLoss()
loss = criterion(logits.detach().cpu(), yb)
loss
TensorBase(0.7833)
criterion.decodes(logits.detach().cpu())
torch.Size([8, 224, 1568])
criterion.activation(logits.detach().cpu()).shape
torch.Size([8, 4, 224, 1568])

For the Tensorboard callback we need this Learner Callback to handle the step after the prediction.

class LossEnabler[source]

LossEnabler(after_create=None, before_fit=None, before_epoch=None, before_train=None, before_batch=None, after_pred=None, after_loss=None, before_backward=None, before_step=None, after_cancel_step=None, after_step=None, after_cancel_batch=None, after_batch=None, after_cancel_train=None, after_train=None, before_validate=None, after_cancel_validate=None, after_validate=None, after_cancel_epoch=None, after_epoch=None, after_cancel_fit=None, after_fit=None) :: Callback

Cast predictions and labels to TensorBase to compute the smp.losses

dls.valid.bs 
8