from fastai.vision.all import *
import numpy as np
from torch.nn.modules.loss import _Loss
import segmentation_models_pytorch as smp
from steel_segmentation.utils import get_train_df
from steel_segmentation.transforms import SteelDataBlock, SteelDataLoaders
Loss functions
= Path("../data")
path = get_train_df(path=path, pivot=True)
train_pivot = SteelDataBlock(path)
block = SteelDataLoaders(block, train_pivot, bs=8)
dls = dls.one_batch()
xb, yb print(xb.shape, xb.device)
print(yb.shape, yb.device)
C:\Users\beanTech\miniconda3\envs\steel_segmentation\lib\site-packages\torch\_tensor.py:575: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.
To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at ..\aten\src\ATen\native\BinaryOps.cpp:467.)
return torch.floor_divide(self, other)
torch.Size([8, 3, 224, 1568]) cuda:0
torch.Size([8, 4, 224, 1568]) cpu
= "cuda" if torch.cuda.is_available() else "cpu"
device device
'cuda'
= smp.Unet("resnet18", classes=4).to(device)
model
= model(xb)
logits = torch.sigmoid(logits)
probs = ( probs > 0.5).float() preds
C:\Users\beanTech\miniconda3\envs\steel_segmentation\lib\site-packages\torch\nn\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at ..\c10/core/TensorImpl.h:1156.)
return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
SoftDiceLoss
SoftDiceLoss ()
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= SoftDiceLoss()
criterion criterion(logits.detach().cpu(), yb)
TensorImage(0.9883)
WeightedSoftDiceLoss
WeightedSoftDiceLoss (size_average=True, weight=[0.2, 0.8])
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= WeightedSoftDiceLoss()
criterion criterion(logits.detach().cpu(), yb)
TensorMask(0.9471)
SoftBCEDiceLoss
SoftBCEDiceLoss (bce_pos_weight, size_average=True, dice_weights=[0.2, 0.8], loss_weights=[0.7, 0.3])
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= SoftBCEDiceLoss(bce_pos_weight=1.5)
criterion criterion(logits.detach().cpu(), yb)
TensorBase(0.7872)
MultiClassesSoftBCEDiceLoss
MultiClassesSoftBCEDiceLoss (classes_num=4, size_average=True, dice_weights=[0.2, 0.8], bce_pos_weights=[2.0, 2.0, 1.0, 1.5], loss_weights=[0.7, 0.3], thresh=0.5)
*Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing them to be nested in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will also have their parameters converted when you call :meth:to
, etc.
.. note:: As per the example above, an __init__()
call to the parent class must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool*
= MultiClassesSoftBCEDiceLoss()
criterion = criterion(logits.detach().cpu(), yb)
loss loss
TensorBase(0.7833)
criterion.decodes(logits.detach().cpu())
torch.Size([8, 224, 1568])
criterion.activation(logits.detach().cpu()).shape
torch.Size([8, 4, 224, 1568])
For the Tensorboard callback we need this Learner Callback to handle the step after the prediction.
LossEnabler
LossEnabler (after_create=None, before_fit=None, before_epoch=None, before_train=None, before_batch=None, after_pred=None, after_loss=None, before_backward=None, after_cancel_backward=None, after_backward=None, before_step=None, after_cancel_step=None, after_step=None, after_cancel_batch=None, after_batch=None, after_cancel_train=None, after_train=None, before_validate=None, after_cancel_validate=None, after_validate=None, after_cancel_epoch=None, after_epoch=None, after_cancel_fit=None, after_fit=None)
Cast predictions and labels to TensorBase to compute the smp.losses
dls.valid.bs
8