From ff1a6e999ccd58e51f07b9dcdeaefb073961fcdf Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Sat, 1 Feb 2020 12:17:13 +0300
Subject: [PATCH 01/12] update catalyst from 20.01 to 20.01.3, update scripts
---
configs/templates/binary.yml | 62 +++++++++++++++++++++++++++-
configs/templates/semantic.yml | 62 +++++++++++++++++++++++++++-
requirements/requirements-docker.txt | 1 +
requirements/requirements.txt | 2 +-
scripts/image2mask.py | 13 +++---
scripts/process_semantic_masks.py | 14 ++++---
scripts/utils.py | 24 -----------
setup.cfg | 2 +-
src/__init__.py | 16 ++++++-
src/callbacks/io.py | 16 +++----
src/callbacks/processing.py | 7 +++-
src/experiment.py | 41 ++----------------
src/runner.py | 16 -------
src/transforms.py | 62 ----------------------------
14 files changed, 174 insertions(+), 164 deletions(-)
delete mode 100644 scripts/utils.py
delete mode 100644 src/runner.py
delete mode 100644 src/transforms.py
diff --git a/configs/templates/binary.yml b/configs/templates/binary.yml
index b8ec192..0ba991a 100644
--- a/configs/templates/binary.yml
+++ b/configs/templates/binary.yml
@@ -1,9 +1,17 @@
+shared:
+ image_size: &image_size {{ image_size }}
+
model_params:
num_classes: {{ num_classes }}
args:
expdir: {{ expdir }}
+runner_params:
+ input_key: image
+ input_target_key: mask
+ output_key: logits
+
stages:
state_params:
@@ -14,11 +22,63 @@ stages:
num_workers: {{ num_workers }}
batch_size: {{ batch_size }}
per_gpu_scaling: True
- image_size: {{ image_size }}
in_csv_train: {{ dataset_path }}/dataset_train.csv
in_csv_valid: {{ dataset_path }}/dataset_valid.csv
datapath: {{ dataset_path }}
+ transform_params:
+ _key_value: True
+
+ train:
+ transform: A.Compose
+ transforms:
+ - &pre_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.LongestMaxSize
+ max_size: *image_size
+ - transform: A.PadIfNeeded
+ min_height: *image_size
+ min_width: *image_size
+ border_mode: 0 # cv2.BORDER_CONSTANT
+ value: 0
+ - &hard_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.ShiftScaleRotate
+ shift_limit: 0.1
+ scale_limit: 0.1
+ rotate_limit: 15
+ border_mode: 2 # cv2.BORDER_REFLECT
+ - transform: A.OneOf
+ transforms:
+ - transform: A.HueSaturationValue
+ - transform: A.ToGray
+ - transform: A.RGBShift
+ - transform: A.ChannelShuffle
+ - transform: A.RandomBrightnessContrast
+ brightness_limit: 0.5
+ contrast_limit: 0.5
+ - transform: A.RandomGamma
+ - transform: A.CLAHE
+ - transform: A.JpegCompression
+ quality_lower: 50
+ - &post_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.Normalize
+ - transform: C.ToTensor
+ valid:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+ infer:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+
criterion_params:
_key_value: True
diff --git a/configs/templates/semantic.yml b/configs/templates/semantic.yml
index 0d896aa..e499599 100644
--- a/configs/templates/semantic.yml
+++ b/configs/templates/semantic.yml
@@ -1,9 +1,17 @@
+shared:
+ image_size: &image_size {{ image_size }}
+
model_params:
num_classes: {{ num_classes }}
args:
expdir: {{ expdir }}
+runner_params:
+ input_key: image
+ input_target_key: mask
+ output_key: logits
+
stages:
state_params:
@@ -14,11 +22,63 @@ stages:
num_workers: {{ num_workers }}
batch_size: {{ batch_size }}
per_gpu_scaling: True
- image_size: {{ image_size }}
in_csv_train: {{ dataset_path }}/dataset_train.csv
in_csv_valid: {{ dataset_path }}/dataset_valid.csv
datapath: {{ dataset_path }}
+ transform_params:
+ _key_value: True
+
+ train:
+ transform: A.Compose
+ transforms:
+ - &pre_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.LongestMaxSize
+ max_size: *image_size
+ - transform: A.PadIfNeeded
+ min_height: *image_size
+ min_width: *image_size
+ border_mode: 0 # cv2.BORDER_CONSTANT
+ value: 0
+ - &hard_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.ShiftScaleRotate
+ shift_limit: 0.1
+ scale_limit: 0.1
+ rotate_limit: 15
+ border_mode: 2 # cv2.BORDER_REFLECT
+ - transform: A.OneOf
+ transforms:
+ - transform: A.HueSaturationValue
+ - transform: A.ToGray
+ - transform: A.RGBShift
+ - transform: A.ChannelShuffle
+ - transform: A.RandomBrightnessContrast
+ brightness_limit: 0.5
+ contrast_limit: 0.5
+ - transform: A.RandomGamma
+ - transform: A.CLAHE
+ - transform: A.JpegCompression
+ quality_lower: 50
+ - &post_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.Normalize
+ - transform: C.ToTensor
+ valid:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+ infer:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+
criterion_params:
_key_value: True
diff --git a/requirements/requirements-docker.txt b/requirements/requirements-docker.txt
index 8aecace..221f00d 100644
--- a/requirements/requirements-docker.txt
+++ b/requirements/requirements-docker.txt
@@ -1,4 +1,5 @@
albumentations==0.4.3
+segmentation-models-pytorch==0.1.0
# Used in scripts
jinja2
diff --git a/requirements/requirements.txt b/requirements/requirements.txt
index 719b4da..c3147d2 100644
--- a/requirements/requirements.txt
+++ b/requirements/requirements.txt
@@ -1,4 +1,4 @@
-catalyst[cv]==20.01
+catalyst[cv]==20.01.3
# Used in scripts
jinja2
diff --git a/scripts/image2mask.py b/scripts/image2mask.py
index 0deec7b..d596f51 100644
--- a/scripts/image2mask.py
+++ b/scripts/image2mask.py
@@ -5,7 +5,7 @@
import pandas as pd
-from utils import find_images_in_dir, id_from_fname
+from catalyst.utils import has_image_extension
def build_args(parser):
@@ -48,10 +48,13 @@ def main(args, _=None):
"""
samples = collections.defaultdict(dict)
for key in ("images", "masks"):
- for fname in find_images_in_dir(args.in_dir / key):
- fname = os.path.join(key, fname)
- sample_id = id_from_fname(fname)
- samples[sample_id].update({"name": sample_id, key: fname})
+ for filename in (args.in_dir / key).iterdir():
+ if has_image_extension(str(filename)):
+ sample_id = filename.stem
+ samples[sample_id].update({
+ "name": sample_id,
+ key: str(filename),
+ })
dataframe = pd.DataFrame.from_dict(samples, orient="index")
diff --git a/scripts/process_semantic_masks.py b/scripts/process_semantic_masks.py
index e7b9e15..c9817dd 100644
--- a/scripts/process_semantic_masks.py
+++ b/scripts/process_semantic_masks.py
@@ -1,14 +1,14 @@
import argparse
-import os
+from multiprocessing.pool import Pool
from pathlib import Path
import numpy as np
import safitty
from catalyst.utils import (
- get_pool, imread, mimwrite_with_meta, Pool, tqdm_parallel_imap
+ get_pool, has_image_extension, imread, mimwrite_with_meta,
+ tqdm_parallel_imap
)
-from utils import id_from_fname
def build_args(parser):
@@ -58,7 +58,7 @@ def preprocess(self, image_path: Path):
np.all((image == self.index2color[index]), axis=-1), index
] = 255
- target_path = self.out_dir / f"{id_from_fname(image_path)}.tiff"
+ target_path = self.out_dir / f"{image_path.stem}.tiff"
target_path.parent.mkdir(parents=True, exist_ok=True)
mimwrite_with_meta(
@@ -66,7 +66,11 @@ def preprocess(self, image_path: Path):
)
def process_all(self, pool: Pool):
- images = os.listdir(self.in_dir)
+ images = [
+ filename
+ for filename in self.in_dir.iterdir()
+ if has_image_extension(str(filename))
+ ]
tqdm_parallel_imap(self.preprocess, images, pool)
diff --git a/scripts/utils.py b/scripts/utils.py
deleted file mode 100644
index ae24c75..0000000
--- a/scripts/utils.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import os
-
-from catalyst.utils import has_image_extension
-
-
-def find_in_dir(dirname: str, full_path: bool = False):
- result = [fname for fname in sorted(os.listdir(dirname))]
- if full_path:
- result = [os.path.join(dirname, fname) for fname in result]
-
- return result
-
-
-def find_images_in_dir(dirname: str, full_path: bool = False):
- result = [
- fname
- for fname in find_in_dir(dirname, full_path=full_path)
- if has_image_extension(fname)
- ]
- return result
-
-
-def id_from_fname(fname: str):
- return os.path.splitext(os.path.basename(fname))[0]
diff --git a/setup.cfg b/setup.cfg
index d3778e1..d7042ec 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,7 +15,7 @@ split_penalty_for_added_line_split = 300
# - dl libs (known_dl)
# - catalyst imports
known_typing = typing
-known_third_party = cv2,imageio,jinja2,numpy,pandas,safitty,skimage
+known_third_party = imageio,jinja2,numpy,pandas,safitty,skimage
known_dl = albumentations,torch,torchvision
known_first_party = catalyst,utils
sections=TYPING,STDLIB,THIRDPARTY,DL,FIRSTPARTY,LOCALFOLDER
diff --git a/src/__init__.py b/src/__init__.py
index 7553b3c..3754036 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -3,9 +3,21 @@
from catalyst.dl import registry
-# add experiment
from .experiment import Experiment
-from .runner import ModelRunner as Runner
+try:
+ import os
+
+ if os.environ.get("USE_ALCHEMY", "0") == "1":
+ from catalyst.contrib.dl import SupervisedAlchemyRunner as Runner
+ elif os.environ.get("USE_NEPTUNE", "0") == "1":
+ from catalyst.contrib.dl import SupervisedNeptuneRunner as Runner
+ elif os.environ.get("USE_WANDB", "0") == "1":
+ from catalyst.contrib.dl import SupervisedWandbRunner as Runner
+ else:
+ from catalyst.dl import SupervisedRunner as Runner
+except ImportError:
+ from catalyst.dl import SupervisedRunner as Runner
+
from . import callbacks
registry.CALLBACKS.add_from_module(callbacks)
diff --git a/src/callbacks/io.py b/src/callbacks/io.py
index 22aeb0a..d41abff 100644
--- a/src/callbacks/io.py
+++ b/src/callbacks/io.py
@@ -3,8 +3,7 @@
import imageio
import numpy as np
-from catalyst.dl import Callback, CallbackOrder, RunnerState
-from catalyst.utils.image import tensor_to_ndimage
+from catalyst.dl import Callback, CallbackOrder, State, utils
from .utils import mask_to_overlay_image
@@ -26,7 +25,7 @@ def __init__(
self.input_key = input_key
self.outpath_key = outpath_key
- def get_image_path(self, state: RunnerState, name: str, suffix: str = ""):
+ def get_image_path(self, state: State, name: str, suffix: str = ""):
if self.relative:
out_dir = Path(state.logdir) / self.output_dir
else:
@@ -38,10 +37,10 @@ def get_image_path(self, state: RunnerState, name: str, suffix: str = ""):
return res
- def on_batch_end(self, state: RunnerState):
+ def on_batch_end(self, state: State):
names = state.input[self.outpath_key]
images = state.input[self.input_key].cpu()
- images = tensor_to_ndimage(images, dtype=np.uint8)
+ images = utils.tensor_to_ndimage(images, dtype=np.uint8)
for image, name in zip(images, names):
fname = self.get_image_path(state, name, self.filename_suffix)
@@ -71,12 +70,15 @@ def __init__(
self.mask_strength = mask_strength
self.output_key = output_key
- def on_batch_end(self, state: RunnerState):
+ def on_batch_end(self, state: State):
names = state.input[self.outpath_key]
- images = tensor_to_ndimage(state.input[self.input_key].cpu())
+ images = utils.tensor_to_ndimage(state.input[self.input_key].cpu())
masks = state.output[self.output_key]
for name, image, mask in zip(names, images, masks):
image = mask_to_overlay_image(image, mask, self.mask_strength)
fname = self.get_image_path(state, name, self.filename_suffix)
imageio.imwrite(fname, image)
+
+
+__all__ = ["OriginalImageSaverCallback", "OverlayMaskImageSaverCallback"]
diff --git a/src/callbacks/processing.py b/src/callbacks/processing.py
index c676e5d..739ecaa 100644
--- a/src/callbacks/processing.py
+++ b/src/callbacks/processing.py
@@ -1,6 +1,6 @@
import torch
-from catalyst.dl import Callback, CallbackOrder, RunnerState
+from catalyst.dl import Callback, CallbackOrder, State
from .utils import encode_mask_with_color
@@ -16,10 +16,13 @@ def __init__(
self.input_key = input_key
self.output_key = output_key
- def on_batch_end(self, state: RunnerState):
+ def on_batch_end(self, state: State):
output: torch.Tensor = torch.sigmoid(
state.output[self.input_key].data.cpu()
).numpy()
state.output[self.output_key] = \
encode_mask_with_color(output, self.threshold)
+
+
+__all__ = ["RawMaskPostprocessingCallback"]
diff --git a/src/experiment.py b/src/experiment.py
index 7b022c6..b22c77e 100644
--- a/src/experiment.py
+++ b/src/experiment.py
@@ -1,8 +1,6 @@
import collections
import json
-import numpy as np
-
import torch
import torch.nn as nn
@@ -11,9 +9,6 @@
)
from catalyst.dl import ConfigExperiment
from catalyst.utils.pandas import read_csv_data
-from .transforms import (
- Compose, hard_transform, post_transforms, pre_transforms
-)
class Experiment(ConfigExperiment):
@@ -30,33 +25,6 @@ def _postprocess_model_for_stage(self, stage: str, model: nn.Module):
param.requires_grad = True
return model_
- @staticmethod
- def get_transforms(
- stage: str = None, mode: str = None, image_size: int = 256
- ):
- pre_transform_fn = pre_transforms(image_size=image_size)
-
- if mode == "train":
- post_transform_fn = Compose([
- hard_transform(image_size=image_size),
- post_transforms(),
- ])
- elif mode in ["valid", "infer"]:
- post_transform_fn = post_transforms()
- else:
- raise NotImplementedError()
-
- transform_fn = Compose([pre_transform_fn, post_transform_fn])
-
- def process(dict_):
- # cast to `float` prevent internal mask scaling in albumentations
- dict_["mask"] = dict_["mask"].astype(np.float32)
-
- result = transform_fn(**dict_)
- return result
-
- return process
-
def get_datasets(
self,
stage: str,
@@ -72,7 +40,6 @@ def get_datasets(
tag_column: str = None,
folds_seed: int = 42,
n_folds: int = 5,
- image_size: int = 256,
):
datasets = collections.OrderedDict()
tag2class = (
@@ -95,16 +62,16 @@ def get_datasets(
open_fn = ReaderCompose(readers=[
ImageReader(
- input_key="images", output_key="image", datapath=datapath
+ input_key="images", output_key="image", rootpath=datapath
),
MaskReader(
- input_key="masks", output_key="mask", datapath=datapath
+ input_key="masks", output_key="mask", rootpath=datapath
),
ScalarReader(
input_key="name",
output_key="name",
- default_value=-1,
dtype=str,
+ default_value=-1,
),
])
@@ -116,7 +83,7 @@ def get_datasets(
list_data=source,
open_fn=open_fn,
dict_transform=self.get_transforms(
- stage=stage, mode=mode, image_size=image_size
+ stage=stage, dataset=mode
),
)
diff --git a/src/runner.py b/src/runner.py
deleted file mode 100644
index 030c756..0000000
--- a/src/runner.py
+++ /dev/null
@@ -1,16 +0,0 @@
-try:
- import os
-
- if os.environ.get("USE_WANDB", "1") == "1":
- from catalyst.dl import SupervisedWandbRunner as Runner
- else:
- from catalyst.dl import SupervisedRunner as Runner
-except ImportError:
- from catalyst.dl import SupervisedRunner as Runner
-
-
-class ModelRunner(Runner):
- def __init__(self, model=None, device=None):
- super().__init__(
- model=model, device=device, input_key="image", output_key="logits"
- )
diff --git a/src/transforms.py b/src/transforms.py
deleted file mode 100644
index 45d8c28..0000000
--- a/src/transforms.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import cv2
-
-from albumentations import (
- ChannelShuffle, CLAHE, Compose, HueSaturationValue, IAAPerspective,
- JpegCompression, LongestMaxSize, Normalize, OneOf, PadIfNeeded,
- RandomBrightnessContrast, RandomGamma, RGBShift, ShiftScaleRotate, ToGray
-)
-from albumentations.pytorch import ToTensorV2
-import torch
-
-cv2.setNumThreads(0)
-cv2.ocl.setUseOpenCL(False)
-
-
-class ToTensor(ToTensorV2):
- """Convert image and mask to ``torch.Tensor``"""
-
- def apply_to_mask(self, mask, **params):
- return torch.from_numpy(mask.transpose(2, 0, 1))
-
-
-def pre_transforms(image_size: int = 256):
- """Transforms that always be applied before other transformations"""
- transforms = Compose([
- LongestMaxSize(max_size=image_size),
- PadIfNeeded(
- image_size, image_size, border_mode=cv2.BORDER_CONSTANT
- ),
- ])
- return transforms
-
-
-def post_transforms():
- """Transforms that always be applied after all other transformations"""
- return Compose([Normalize(), ToTensor()])
-
-
-def hard_transform(image_size: int = 256, p: float = 0.5):
- """Hard augmentations"""
- transforms = Compose([
- ShiftScaleRotate(
- shift_limit=0.1,
- scale_limit=0.1,
- rotate_limit=15,
- border_mode=cv2.BORDER_REFLECT,
- p=p,
- ),
- IAAPerspective(scale=(0.02, 0.05), p=p),
- OneOf([
- HueSaturationValue(p=p),
- ToGray(p=p),
- RGBShift(p=p),
- ChannelShuffle(p=p),
- ]),
- RandomBrightnessContrast(
- brightness_limit=0.5, contrast_limit=0.5, p=p
- ),
- RandomGamma(p=p),
- CLAHE(p=p),
- JpegCompression(quality_lower=50, p=p),
- ])
- return transforms
From 320aaa7bfa0b18f0c0f3ca11b9345f969b169368 Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Sat, 1 Feb 2020 17:05:46 +0300
Subject: [PATCH 02/12] add instance segmentation pipeline from
catalyst-private
---
README.md | 115 ++++++++++
...catalyst-instance-segmentation-pipeline.sh | 136 ++++++++++++
configs/templates/instance.yml | 200 ++++++++++++++++++
requirements/requirements-docker.txt | 2 +
requirements/requirements.txt | 2 +
scripts/process_instance_masks.py | 190 +++++++++++++++++
setup.cfg | 2 +-
src/callbacks/__init__.py | 10 +-
src/callbacks/io.py | 42 +++-
src/callbacks/metrics.py | 84 ++++++++
src/callbacks/processing.py | 50 ++++-
src/callbacks/utils.py | 168 ++++++++++++++-
12 files changed, 990 insertions(+), 11 deletions(-)
create mode 100644 bin/catalyst-instance-segmentation-pipeline.sh
create mode 100644 configs/templates/instance.yml
create mode 100644 scripts/process_instance_masks.py
create mode 100644 src/callbacks/metrics.py
diff --git a/README.md b/README.md
index 90b9ab5..804c7ec 100644
--- a/README.md
+++ b/README.md
@@ -82,6 +82,12 @@ elif [[ "$DATASET" == "voc2012" ]]; then
tar -xf VOCtrainval_11-May-2012.tar &>/dev/null
mkdir -p ./data/origin/images/; mv VOCdevkit/VOC2012/JPEGImages/* $_
mkdir -p ./data/origin/raw_masks; mv VOCdevkit/VOC2012/SegmentationClass/* $_
+elif [[ "$DATASET" == "dsb2018" ]]; then
+ # instance segmentation
+ # https://www.kaggle.com/c/data-science-bowl-2018
+ download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
+ tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
+ mv dsb2018_cleared_191109 ./data/origin
fi
```
@@ -97,6 +103,11 @@ fi
#### Data structure
Make sure, that final folder with data has the required structure:
+
+
+Data structure for binary segmentation
+
+
```bash
/path/to/your_dataset/
images/
@@ -110,6 +121,66 @@ Make sure, that final folder with data has the required structure:
...
mask_N
```
+where each `mask` is a binary image
+
+
+
+
+
+Data structure for semantic segmentation
+
+
+```bash
+/path/to/your_dataset/
+ images/
+ image_1
+ image_2
+ ...
+ image_N
+ raw_masks/
+ mask_1
+ mask_2
+ ...
+ mask_N
+```
+where each `mask` is an image with class encoded through colors e.g. [VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/) dataset where `bicycle` class is encoded with green color and `bird` with olive
+
+
+
+
+
+Data structure for instance segmentation
+
+
+```bash
+/path/to/your_dataset/
+ images/
+ image_1
+ image_2
+ ...
+ image_M
+ raw_masks/
+ mask_1/
+ instance_1
+ instance_2
+ ...
+ instance_N
+ mask_2/
+ instance_1
+ instance_2
+ ...
+ instance_K
+ ...
+ mask_M/
+ instance_1
+ instance_2
+ ...
+ instance_Z
+```
+where each `mask` represented as a folder with instances images (one image per instance), and masks may consisting of a different number of instances e.g. [Data Science Bowl 2018](https://www.kaggle.com/c/data-science-bowl-2018) dataset
+
+
+
#### Data location
@@ -234,6 +305,50 @@ docker run -it --rm --shm-size 8G --runtime=nvidia \
+
+Instance segmentation pipeline
+
+
+#### Run in local environment:
+
+```bash
+CUDA_VISIBLE_DEVICES=0 \
+CUDNN_BENCHMARK="True" \
+CUDNN_DETERMINISTIC="True" \
+WORKDIR=./logs \
+DATADIR=./data/origin \
+IMAGE_SIZE=256 \
+CONFIG_TEMPLATE=./configs/templates/instance.yml \
+NUM_WORKERS=4 \
+BATCH_SIZE=256 \
+bash ./bin/catalyst-instance-segmentation-pipeline.sh
+```
+
+#### Run in docker:
+
+```bash
+export LOGDIR=$(pwd)/logs
+docker run -it --rm --shm-size 8G --runtime=nvidia \
+ -v $(pwd):/workspace/ \
+ -v $LOGDIR:/logdir/ \
+ -v $(pwd)/data/origin:/data \
+ -e "CUDA_VISIBLE_DEVICES=0" \
+ -e "USE_WANDB=1" \
+ -e "LOGDIR=/logdir" \
+ -e "CUDNN_BENCHMARK='True'" \
+ -e "CUDNN_DETERMINISTIC='True'" \
+ -e "WORKDIR=/logdir" \
+ -e "DATADIR=/data" \
+ -e "IMAGE_SIZE=256" \
+ -e "CONFIG_TEMPLATE=./configs/templates/instance.yml" \
+ -e "NUM_WORKERS=4" \
+ -e "BATCH_SIZE=256" \
+ catalyst-segmentation ./bin/catalyst-instance-segmentation-pipeline.sh
+```
+
+
+
+
The pipeline is running and you don’t have to do anything else, it remains to wait for the best model!
#### Visualizations
diff --git a/bin/catalyst-instance-segmentation-pipeline.sh b/bin/catalyst-instance-segmentation-pipeline.sh
new file mode 100644
index 0000000..7b14b29
--- /dev/null
+++ b/bin/catalyst-instance-segmentation-pipeline.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+#title :catalyst-instance-segmentation-pipeline
+#description :catalyst.dl script for instance segmentation pipeline run
+#author :Sergey Kolesnikov, Yauheni Kachan
+#author_email :scitator@gmail.com, yauheni.kachan@gmail.com
+#date :20191109
+#version :19.11.1
+#==============================================================================
+
+# usage:
+# WORKDIR=/path/to/logdir \
+# DATADIR=/path/to/dataset \
+# IMAGE_SIZE=... \
+# CONFIG_TEMPLATE=... \ # model config to use
+# ./bin/catalyst-instance-segmentation-pipeline.sh
+
+# example:
+# CUDA_VISIBLE_DEVICES=0 \
+# CUDNN_BENCHMARK="True" \
+# CUDNN_DETERMINISTIC="True" \
+# WORKDIR=./logs \
+# DATADIR=./data/origin \
+# IMAGE_SIZE=256 \
+# CONFIG_TEMPLATE=./configs/templates/instance.yml \
+# NUM_WORKERS=4 \
+# BATCH_SIZE=256 \
+# ./bin/catalyst-instance-segmentation-pipeline.sh
+
+set -e
+
+# --- test part
+# uncomment and run bash ./bin/catalyst-instance-segmentation-pipeline.sh
+
+#mkdir -p ./data
+#download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
+#tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
+#mv dsb2018_cleared_191109 ./data/origin
+#
+#export CUDNN_BENCHMARK="True"
+#export CUDNN_DETERMINISTIC="True"
+#
+#export CONFIG_TEMPLATE=./configs/templates/instance.yml
+#export WORKDIR=./logs
+#export DATADIR=./data/origin
+#export NUM_WORKERS=4
+#export BATCH_SIZE=64
+#export IMAGE_SIZE=256
+
+# ---- environment variables
+
+if [[ -z "$NUM_WORKERS" ]]; then
+ NUM_WORKERS=4
+fi
+
+if [[ -z "$BATCH_SIZE" ]]; then
+ BATCH_SIZE=64
+fi
+
+if [[ -z "$IMAGE_SIZE" ]]; then
+ IMAGE_SIZE=256
+fi
+
+if [[ -z "$CONFIG_TEMPLATE" ]]; then
+ CONFIG_TEMPLATE="./configs/templates/instance.yml"
+fi
+
+if [[ -z "$DATADIR" ]]; then
+ DATADIR="./data/origin"
+fi
+
+if [[ -z "$WORKDIR" ]]; then
+ WORKDIR="./logs"
+fi
+
+SKIPDATA=""
+while getopts ":s" flag; do
+ case "${flag}" in
+ s) SKIPDATA="true" ;;
+ esac
+done
+
+date=$(date +%y%m%d-%H%M%S)
+postfix=$(openssl rand -hex 4)
+logname="$date-$postfix"
+export DATASET_DIR=$WORKDIR/dataset
+export RAW_MASKS_DIR=$DATASET_DIR/raw_masks
+export CONFIG_DIR=$WORKDIR/configs-${logname}
+export LOGDIR=$WORKDIR/logdir-${logname}
+
+mkdir -p $WORKDIR
+mkdir -p $DATASET_DIR
+mkdir -p $CONFIG_DIR
+mkdir -p $LOGDIR
+
+# ---- data preparation
+
+if [[ -z "${SKIPDATA}" ]]; then
+ cp -R $DATADIR/* $DATASET_DIR/
+
+ mkdir -p $DATASET_DIR/masks
+ python scripts/process_instance_masks.py \
+ --in-dir $RAW_MASKS_DIR \
+ --out-dir $DATASET_DIR/masks \
+ --num-workers $NUM_WORKERS
+
+ python scripts/image2mask.py \
+ --in-dir $DATASET_DIR \
+ --out-dataset $DATASET_DIR/dataset_raw.csv
+
+ catalyst-data split-dataframe \
+ --in-csv $DATASET_DIR/dataset_raw.csv \
+ --n-folds=5 --train-folds=0,1,2,3 \
+ --out-csv=$DATASET_DIR/dataset.csv
+fi
+
+
+# ---- config preparation
+
+python ./scripts/prepare_config.py \
+ --in-template=$CONFIG_TEMPLATE \
+ --out-config=$CONFIG_DIR/config.yml \
+ --expdir=./src \
+ --dataset-path=$DATASET_DIR \
+ --num-classes=2 \
+ --num-workers=$NUM_WORKERS \
+ --batch-size=$BATCH_SIZE \
+ --image-size=$IMAGE_SIZE
+
+cp -r ./configs/_common.yml $CONFIG_DIR/_common.yml
+
+
+# ---- model training
+
+catalyst-dl run \
+ -C $CONFIG_DIR/_common.yml $CONFIG_DIR/config.yml \
+ --logdir $LOGDIR $*
diff --git a/configs/templates/instance.yml b/configs/templates/instance.yml
new file mode 100644
index 0000000..e26c62b
--- /dev/null
+++ b/configs/templates/instance.yml
@@ -0,0 +1,200 @@
+shared:
+ image_size: &image_size {{ image_size }}
+
+model_params:
+ num_classes: {{ num_classes }}
+
+args:
+ expdir: {{ expdir }}
+
+runner_params:
+ input_key: image
+ input_target_key: mask
+ output_key: logits
+
+stages:
+
+ state_params:
+ main_metric: &reduce_metric iou_hard
+ minimize_metric: False
+
+ data_params:
+ num_workers: {{ num_workers }}
+ batch_size: {{ batch_size }}
+ per_gpu_scaling: True
+ in_csv_train: {{ dataset_path }}/dataset_train.csv
+ in_csv_valid: {{ dataset_path }}/dataset_valid.csv
+ datapath: {{ dataset_path }}
+
+ transform_params:
+ _key_value: True
+
+ train:
+ transform: A.Compose
+ transforms:
+ - &pre_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.LongestMaxSize
+ max_size: *image_size
+ - transform: A.PadIfNeeded
+ min_height: *image_size
+ min_width: *image_size
+ border_mode: 0 # cv2.BORDER_CONSTANT
+ value: 0
+ - &hard_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.ShiftScaleRotate
+ shift_limit: 0.1
+ scale_limit: 0.1
+ rotate_limit: 15
+ border_mode: 2 # cv2.BORDER_REFLECT
+ - transform: A.OneOf
+ transforms:
+ - transform: A.HueSaturationValue
+ - transform: A.ToGray
+ - transform: A.RGBShift
+ - transform: A.ChannelShuffle
+ - transform: A.RandomBrightnessContrast
+ brightness_limit: 0.5
+ contrast_limit: 0.5
+ - transform: A.RandomGamma
+ - transform: A.CLAHE
+ - transform: A.JpegCompression
+ quality_lower: 50
+ - &post_transforms
+ transform: A.Compose
+ transforms:
+ - transform: A.Normalize
+ - transform: C.ToTensor
+ valid:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+ infer:
+ transform: A.Compose
+ transforms:
+ - *pre_transforms
+ - *post_transforms
+
+ criterion_params:
+ _key_value: True
+
+ bce:
+ criterion: BCEWithLogitsLoss
+ dice:
+ criterion: DiceLoss
+ iou:
+ criterion: IoULoss
+
+ callbacks_params:
+ loss_bce:
+ callback: CriterionCallback
+ input_key: mask
+ output_key: logits
+ prefix: loss_bce
+ criterion_key: bce
+ multiplier: 1.0
+ loss_dice:
+ callback: CriterionCallback
+ input_key: mask
+ output_key: logits
+ prefix: loss_dice
+ criterion_key: dice
+ multiplier: 1.0
+ loss_iou:
+ callback: CriterionCallback
+ input_key: mask
+ output_key: logits
+ prefix: loss_iou
+ criterion_key: iou
+ multiplier: 1.0
+
+ loss_aggregator:
+ callback: CriterionAggregatorCallback
+ prefix: &aggregated_loss loss
+ loss_aggregate_fn: "mean" # or "sum"
+ multiplier: 1.0 # scale factor for the aggregated loss
+
+ raw_processor:
+ callback: RawMaskPostprocessingCallback
+ instance_extractor:
+ callback: InstanceMaskPostprocessingCallback
+ watershed_threshold: 0.9
+ mask_threshold: 0.8
+ output_key: instance_mask
+ out_key_semantic: semantic_mask
+ out_key_border: border_mask
+
+ iou_soft:
+ callback: IouCallback
+ input_key: mask
+ output_key: logits
+ prefix: iou_soft
+ iou_hard:
+ callback: IouCallback
+ input_key: mask
+ output_key: logits
+ prefix: iou_hard
+ threshold: 0.5
+
+ optimizer:
+ callback: OptimizerCallback
+ loss_key: *aggregated_loss
+ scheduler:
+ callback: SchedulerCallback
+ reduce_metric: *reduce_metric
+ saver:
+ callback: CheckpointCallback
+
+ # infer:
+ #
+ # data_params:
+ # num_workers: {{ num_workers }}
+ # batch_size: {{ batch_size }}
+ # per_gpu_scaling: True
+ # in_csv: null
+ # in_csv_train: null
+ # in_csv_valid: {{ dataset_path }}/dataset_valid.csv
+ # in_csv_infer: {{ dataset_path }}/dataset_train.csv
+ # datapath: {{ dataset_path }}
+ #
+ # callbacks_params:
+ # loader:
+ # callback: CheckpointCallback
+ #
+ # raw_processor:
+ # callback: RawMaskPostprocessingCallback
+ # instance_extractor:
+ # callback: InstanceMaskPostprocessingCallback
+ # watershed_threshold: 0.9
+ # mask_threshold: 0.8
+ # output_key: instance_mask
+ # out_key_semantic: semantic_mask
+ # out_key_border: border_mask
+ #
+ # image_saver:
+ # callback: OriginalImageSaverCallback
+ # output_dir: infer
+ # saver_mask:
+ # callback: OverlayMaskImageSaverCallback
+ # output_dir: infer
+ # filename_suffix: _01_raw_mask
+ # output_key: mask
+ # saver_semantic:
+ # callback: OverlayMaskImageSaverCallback
+ # output_dir: infer
+ # filename_suffix: _02_semantic_mask
+ # output_key: semantic_mask
+ # saver_border:
+ # callback: OverlayMaskImageSaverCallback
+ # output_dir: infer
+ # filename_suffix: _03_border_mask
+ # output_key: border_mask
+ # saver_instance:
+ # callback: OverlayMaskImageSaverCallback
+ # output_dir: infer
+ # filename_suffix: _04_instance_mask
+ # output_key: instance_mask
diff --git a/requirements/requirements-docker.txt b/requirements/requirements-docker.txt
index 221f00d..4c13d83 100644
--- a/requirements/requirements-docker.txt
+++ b/requirements/requirements-docker.txt
@@ -1,5 +1,7 @@
albumentations==0.4.3
+opencv-python>=4.1.1
segmentation-models-pytorch==0.1.0
+shapely[vectorized]==1.7.0
# Used in scripts
jinja2
diff --git a/requirements/requirements.txt b/requirements/requirements.txt
index c3147d2..896d0bc 100644
--- a/requirements/requirements.txt
+++ b/requirements/requirements.txt
@@ -1,4 +1,6 @@
catalyst[cv]==20.01.3
+opencv-python>=4.1.1
+shapely[vectorized]==1.7.0
# Used in scripts
jinja2
diff --git a/scripts/process_instance_masks.py b/scripts/process_instance_masks.py
new file mode 100644
index 0000000..86184b8
--- /dev/null
+++ b/scripts/process_instance_masks.py
@@ -0,0 +1,190 @@
+from typing import List # isort:skip
+import argparse
+from multiprocessing.pool import Pool
+from pathlib import Path
+
+import numpy as np
+from skimage import measure, morphology
+
+from catalyst.utils import (
+ get_pool, has_image_extension, imread, mimwrite_with_meta,
+ tqdm_parallel_imap
+)
+
+
+def build_args(parser):
+ parser.add_argument(
+ "--in-dir",
+ type=Path,
+ required=True,
+ help="Raw masks folder path"
+ )
+ parser.add_argument(
+ "--out-dir",
+ type=Path,
+ required=True,
+ help="Processed masks folder path"
+ )
+ parser.add_argument("--threshold", type=float, default=0.0)
+ parser.add_argument(
+ "--n-channels",
+ type=int,
+ choices={2, 3},
+ default=2,
+ help="Number of channels in output masks"
+ )
+ parser.add_argument(
+ "--num-workers",
+ default=1,
+ type=int,
+ help="Number of workers to parallel the processing"
+ )
+
+ return parser
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ build_args(parser)
+ args = parser.parse_args()
+ return args
+
+
+def mim_interaction(mim: List[np.ndarray], threshold: float = 0) -> np.ndarray:
+ result = np.zeros_like(mim[0], dtype=np.uint8)
+ result[np.stack(mim, axis=-1).max(axis=-1) > threshold] = 255
+ return result
+
+
+def mim_color_encode(
+ mim: List[np.ndarray], threshold: float = 0
+) -> np.ndarray:
+ result = np.zeros_like(mim[0], dtype=np.uint8)
+ for index, im in enumerate(mim, start=1):
+ result[im > threshold] = index
+
+ return result
+
+
+class Preprocessor:
+ def __init__(
+ self,
+ in_dir: Path,
+ out_dir: Path,
+ threshold: float = 0.0,
+ n_channels: int = 2,
+ ):
+ """
+ Args:
+ in_dir (Path): raw masks folder path, input folder structure
+ should be following:
+ in_path # dir with raw masks
+ |-- sample_1
+ | |-- instance_1
+ | |-- instance_2
+ | ..
+ | `-- instance_N
+ |-- sample_1
+ | |-- instance_1
+ | |-- instance_2
+ | ..
+ | `-- instance_K
+ ..
+ `-- sample_M
+ |-- instance_1
+ |-- instance_2
+ ..
+ `-- instance_Z
+ out_dir (Path): processed masks folder path, output folder
+ structure will be following:
+ out_path
+ |-- sample_1.tiff # image of shape HxWxN
+ |-- sample_2.tiff # image of shape HxWxK
+ ..
+ `-- sample_M.tiff # image of shape HxWxZ
+ threshold (float):
+ n_channels (int): number of channels in output masks,
+ see https://www.kaggle.com/c/data-science-bowl-2018/discussion/54741 # noqa: E501
+ """
+ self.in_dir = in_dir
+ self.out_dir = out_dir
+ self.threshold = threshold
+ self.n_channels = n_channels
+
+ def preprocess(self, sample: Path):
+ masks = [
+ imread(filename, grayscale=True, expand_dims=False)
+ for filename in sample.iterdir()
+ if has_image_extension(str(filename))
+ ]
+ labels = mim_color_encode(masks, self.threshold)
+
+ scaled_blobs = morphology.dilation(labels > 0, morphology.square(9))
+ watersheded_blobs = morphology.watershed(
+ scaled_blobs, labels, mask=scaled_blobs, watershed_line=True
+ ) > 0
+ watershed_lines = scaled_blobs ^ (watersheded_blobs)
+ scaled_watershed_lines = morphology.dilation(
+ watershed_lines, morphology.square(7)
+ )
+
+ props = measure.regionprops(labels)
+ max_area = max(p.area for p in props)
+
+ mask_without_borders = mim_interaction(masks, self.threshold)
+ borders = np.zeros_like(labels, dtype=np.uint8)
+ for y0 in range(labels.shape[0]):
+ for x0 in range(labels.shape[1]):
+ if not scaled_watershed_lines[y0, x0]:
+ continue
+
+ if labels[y0, x0] == 0:
+ if max_area > 4000:
+ sz = 6
+ else:
+ sz = 3
+ else:
+ if props[labels[y0, x0] - 1].area < 300:
+ sz = 1
+ elif props[labels[y0, x0] - 1].area < 2000:
+ sz = 2
+ else:
+ sz = 3
+
+ uniq = np.unique(labels[
+ max(0, y0 - sz):min(labels.shape[0], y0 + sz + 1),
+ max(0, x0 - sz):min(labels.shape[1], x0 + sz + 1),
+ ])
+ if len(uniq[uniq > 0]) > 1:
+ borders[y0, x0] = 255
+ mask_without_borders[y0, x0] = 0
+
+ if self.n_channels == 2:
+ mask = [mask_without_borders, borders]
+ elif self.n_channels == 3:
+ background = 255 - (mask_without_borders + borders)
+ mask = [mask_without_borders, borders, background]
+ else:
+ raise ValueError()
+
+ mimwrite_with_meta(
+ self.out_dir / f"{sample.stem}.tiff", mask, {"compress": 9}
+ )
+
+ def process_all(self, pool: Pool):
+ images = list(self.in_dir.iterdir())
+ tqdm_parallel_imap(self.preprocess, images, pool)
+
+
+def main(args, _=None):
+ args = args.__dict__
+ args.pop("command", None)
+ num_workers = args.pop("num_workers")
+
+ with get_pool(num_workers) as p:
+ Preprocessor(**args).process_all(p)
+
+
+if __name__ == "__main__":
+ args = parse_args()
+ main(args)
diff --git a/setup.cfg b/setup.cfg
index d7042ec..10576d2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,7 +15,7 @@ split_penalty_for_added_line_split = 300
# - dl libs (known_dl)
# - catalyst imports
known_typing = typing
-known_third_party = imageio,jinja2,numpy,pandas,safitty,skimage
+known_third_party = cv2,imageio,jinja2,numpy,pandas,safitty,shapely,skimage
known_dl = albumentations,torch,torchvision
known_first_party = catalyst,utils
sections=TYPING,STDLIB,THIRDPARTY,DL,FIRSTPARTY,LOCALFOLDER
diff --git a/src/callbacks/__init__.py b/src/callbacks/__init__.py
index 62a7a82..dff5731 100644
--- a/src/callbacks/__init__.py
+++ b/src/callbacks/__init__.py
@@ -1,4 +1,10 @@
# flake8: noqa
-from .io import OriginalImageSaverCallback, OverlayMaskImageSaverCallback
-from .processing import RawMaskPostprocessingCallback
+from .io import (
+ InstanceCropSaverCallback, OriginalImageSaverCallback,
+ OverlayMaskImageSaverCallback
+)
+from .metrics import SegmentationMeanAPCallback
+from .processing import (
+ InstanceMaskPostprocessingCallback, RawMaskPostprocessingCallback
+)
diff --git a/src/callbacks/io.py b/src/callbacks/io.py
index d41abff..efaac70 100644
--- a/src/callbacks/io.py
+++ b/src/callbacks/io.py
@@ -4,7 +4,7 @@
import numpy as np
from catalyst.dl import Callback, CallbackOrder, State, utils
-from .utils import mask_to_overlay_image
+from .utils import crop_by_masks, mask_to_overlay_image
class OriginalImageSaverCallback(Callback):
@@ -81,4 +81,42 @@ def on_batch_end(self, state: State):
imageio.imwrite(fname, image)
-__all__ = ["OriginalImageSaverCallback", "OverlayMaskImageSaverCallback"]
+class InstanceCropSaverCallback(OriginalImageSaverCallback):
+ def __init__(
+ self,
+ output_dir: str,
+ relative: bool = True,
+ filename_extension: str = ".jpg",
+ input_key: str = "image",
+ output_key: str = "mask",
+ outpath_key: str = "name",
+ ):
+ super().__init__(
+ output_dir=output_dir,
+ relative=relative,
+ filename_suffix=filename_extension,
+ input_key=input_key,
+ outpath_key=outpath_key,
+ )
+ self.output_key = output_key
+
+ def on_batch_end(self, state: State):
+ names = state.input[self.outpath_key]
+ images = utils.tensor_to_ndimage(state.input[self.input_key].cpu())
+ masks = state.output[self.output_key]
+
+ for name, image, masks_ in zip(names, images, masks):
+ instances = crop_by_masks(image, masks_)
+
+ for index, crop in enumerate(instances):
+ filename = self.get_image_path(
+ state, name, suffix=f"_instance{index:02d}"
+ )
+ imageio.imwrite(filename, crop)
+
+
+__all__ = [
+ "OriginalImageSaverCallback",
+ "OverlayMaskImageSaverCallback",
+ "InstanceCropSaverCallback",
+]
diff --git a/src/callbacks/metrics.py b/src/callbacks/metrics.py
new file mode 100644
index 0000000..a6206f9
--- /dev/null
+++ b/src/callbacks/metrics.py
@@ -0,0 +1,84 @@
+import numpy as np
+
+from catalyst.dl import MetricCallback
+
+
+def compute_ious_single_image(predicted_mask, gt_instance_masks):
+ instance_ids = np.unique(predicted_mask)
+ n_gt_instaces = gt_instance_masks.shape[0]
+
+ all_ious = []
+
+ for id_ in instance_ids:
+ if id_ == 0:
+ # Skip background
+ continue
+
+ predicted_instance_mask = predicted_mask == id_
+
+ sum_ = (
+ predicted_instance_mask.reshape(1, -1) +
+ gt_instance_masks.reshape(n_gt_instaces, -1)
+ )
+
+ intersection = (sum_ == 2).sum(axis=1)
+ union = (sum_ > 0).sum(axis=1)
+
+ ious = intersection / union
+
+ all_ious.append(ious)
+
+ all_ious = np.array(all_ious).reshape((len(all_ious), n_gt_instaces))
+
+ return all_ious
+
+
+def map_from_ious(ious: np.ndarray, iou_thresholds: np.ndarray):
+ """
+ Args:
+ ious (np.ndarray): array of shape n_pred x n_gt
+ iou_thresholds (np.ndarray):
+ """
+ n_preds = ious.shape[0]
+
+ fn_at_ious = (
+ np.max(ious, axis=0, initial=0)[None, :] < iou_thresholds[:, None]
+ )
+ fn_at_iou = np.sum(fn_at_ious, axis=1, initial=0)
+
+ tp_at_ious = (
+ np.max(ious, axis=0, initial=0)[None, :] > iou_thresholds[:, None]
+ )
+ tp_at_iou = np.sum(tp_at_ious, axis=1, initial=0)
+
+ metric_at_iou = tp_at_iou / (n_preds + fn_at_iou)
+
+ return metric_at_iou.mean()
+
+
+def mean_average_precision(outputs, targets, iou_thresholds):
+ batch_metrics = []
+ for pred, gt in zip(outputs, targets):
+ ious = compute_ious_single_image(pred, gt.numpy())
+ batch_metrics.append(map_from_ious(ious, iou_thresholds))
+ return float(np.mean(batch_metrics))
+
+
+class SegmentationMeanAPCallback(MetricCallback):
+ def __init__(
+ self,
+ input_key: str = "imasks",
+ output_key: str = "instance_mask",
+ prefix: str = "mAP",
+ iou_thresholds=(0.5, 0.55, 0.6, 0.7, 0.75, 0.8, 0.9, 0.95),
+ ):
+ super().__init__(
+ prefix=prefix,
+ metric_fn=mean_average_precision,
+ input_key=input_key,
+ output_key=output_key,
+ iou_thresholds=np.array(iou_thresholds),
+ )
+
+
+__all__ = ["SegmentationMeanAPCallback"]
diff --git a/src/callbacks/processing.py b/src/callbacks/processing.py
index 739ecaa..a31a806 100644
--- a/src/callbacks/processing.py
+++ b/src/callbacks/processing.py
@@ -1,7 +1,7 @@
import torch
from catalyst.dl import Callback, CallbackOrder, State
-from .utils import encode_mask_with_color
+from .utils import encode_mask_with_color, label_instances
class RawMaskPostprocessingCallback(Callback):
@@ -25,4 +25,50 @@ def on_batch_end(self, state: State):
encode_mask_with_color(output, self.threshold)
-__all__ = ["RawMaskPostprocessingCallback"]
+class InstanceMaskPostprocessingCallback(Callback):
+ def __init__(
+ self,
+ watershed_threshold: float = 0.5,
+ mask_threshold: float = 0.5,
+ input_key: str = "logits",
+ output_key: str = "instance_mask",
+ out_key_semantic: str = None,
+ out_key_border: str = None,
+ ):
+ super().__init__(CallbackOrder.Internal)
+ self.watershed_threshold = watershed_threshold
+ self.mask_threshold = mask_threshold
+ self.input_key = input_key
+ self.output_key = output_key
+ self.out_key_semantic = out_key_semantic
+ self.out_key_border = out_key_border
+
+ def on_batch_end(self, state: State):
+ output: torch.Tensor = torch.sigmoid(state.output[self.input_key])
+
+ semantic, border = output.chunk(2, -3)
+
+ if self.out_key_semantic is not None:
+ state.output[self.out_key_semantic] = encode_mask_with_color(
+ semantic.data.cpu().numpy(), threshold=self.mask_threshold
+ )
+
+ if self.out_key_border is not None:
+ state.output[self.out_key_border] = (
+ border.data.cpu().squeeze(-3).numpy() >
+ self.watershed_threshold
+ )
+
+ state.output[self.output_key] = label_instances(
+ semantic,
+ border,
+ watershed_threshold=self.watershed_threshold,
+ instance_mask_threshold=self.mask_threshold,
+ downscale_factor=1,
+ )
+
+
+__all__ = [
+ "RawMaskPostprocessingCallback",
+ "InstanceMaskPostprocessingCallback",
+]
diff --git a/src/callbacks/utils.py b/src/callbacks/utils.py
index 6002c37..77c7551 100644
--- a/src/callbacks/utils.py
+++ b/src/callbacks/utils.py
@@ -1,22 +1,29 @@
-from typing import List # isort:skip
-
+from typing import List, Tuple, Union # isort:skip
+import cv2
import numpy as np
+from shapely.geometry import LinearRing, MultiPoint
from skimage.color import label2rgb
+from skimage.measure import label, regionprops
+from skimage.morphology import watershed
import torch
+import torch.nn.functional as F
+
+# types
+Point = Tuple[int, int]
+Quadrangle = Tuple[Point, Point, Point, Point]
def encode_mask_with_color(
semantic_masks: torch.Tensor, threshold: float = 0.5
) -> List[np.ndarray]:
"""
-
Args:
semantic_masks (torch.Tensor): semantic mask batch tensor
threshold (float): threshold for semantic masks
+
Returns:
List[np.ndarray]: list of semantic masks
-
"""
batch = []
for observation in semantic_masks:
@@ -38,3 +45,156 @@ def mask_to_overlay_image(
(image_with_overlay * 255).clip(0, 255).round().astype(np.uint8)
)
return image_with_overlay
+
+
+def label_instances(
+ semantic_masks: torch.Tensor,
+ border_masks: torch.Tensor,
+ watershed_threshold: float = 0.9,
+ instance_mask_threshold: float = 0.5,
+ downscale_factor: float = 4,
+ interpolation: str = "bilinear",
+) -> List[np.ndarray]:
+ """
+ Args:
+ semantic_masks (torch.Tensor): semantic mask batch tensor
+ border_masks (torch.Tensor): instance mask batch tensor
+ watershed_threshold (float): threshold for watershed markers
+ instance_mask_threshold (float): threshold for final instance masks
+ downscale_factor (float): mask downscaling factor
+ (to speed up processing)
+ interpolation (str): interpolation method
+
+ Returns:
+ List[np.ndarray]: list of labeled instance masks, one per batch item
+ """
+ bordered_masks = (semantic_masks - border_masks).clamp(min=0)
+
+ scaling = 1 / downscale_factor
+ semantic_masks, bordered_masks = (
+ F.interpolate(
+ mask.data.cpu(),
+ scale_factor=scaling,
+ mode=interpolation,
+ align_corners=False,
+ ).squeeze(-3).numpy() for mask in (semantic_masks, bordered_masks)
+ )
+
+ result: List[np.ndarray] = []
+ for semantic, bordered in zip(semantic_masks, bordered_masks):
+ watershed_marks = label(bordered > watershed_threshold, background=0)
+ instance_regions = watershed(-bordered, watershed_marks)
+
+ instance_regions[semantic < instance_mask_threshold] = 0
+
+ result.append(instance_regions)
+
+ return result
+
+
+def _is_ccw(vertices: np.ndarray):
+ return LinearRing(vertices * [[1, -1]]).is_ccw
+
+
+def get_rects_from_mask(
+ label_mask: np.ndarray, min_area_fraction=20
+) -> np.ndarray:
+ props = regionprops(label_mask)
+
+ total_h, total_w = label_mask.shape
+ total_area = total_h * total_w
+
+ result = []
+ for p in props:
+
+ if p.area / total_area < min_area_fraction:
+ continue
+
+ coords = p.coords
+ coords = coords[:, ::-1] # row, col -> col, row
+
+ rect = MultiPoint(coords).minimum_rotated_rectangle.exterior.coords
+
+ rect = np.array(rect)[:4].astype(np.int32)
+
+ if _is_ccw(rect):
+ rect = rect[::-1]
+
+ result.append(rect)
+
+ result = np.stack(result) if result else []
+
+ return result
+
+
+def perspective_crop(
+ image: np.ndarray,
+ crop_coords: Union[Quadrangle, np.ndarray],
+ output_wh: Tuple[int, int],
+ border_color: Tuple[int, int, int] = (255, 255, 255),
+):
+ width, height = output_wh
+ target_coords = ((0, 0), (width, 0), (width, height), (0, height))
+
+ transform_matrix = cv2.getPerspectiveTransform(
+ np.array(crop_coords, dtype=np.float32),
+ np.array(target_coords, dtype=np.float32),
+ )
+
+ result = cv2.warpPerspective(
+ image,
+ transform_matrix,
+ (width, height),
+ borderMode=cv2.BORDER_CONSTANT,
+ borderValue=(border_color),
+ )
+
+ return result
+
+
+def perspective_crop_keep_ratio(
+ image: np.ndarray,
+ vertices: np.ndarray,
+ output_size: int = -1,
+ border_color: Tuple[int, int, int] = (255, 255, 255),
+) -> np.ndarray:
+ """
+ Crop some quadrilateral from image keeping it's aspect ratio
+
+ Args:
+ image (np.ndarray): image numpy array
+ vertices (np.ndarray): numpy array with quadrilateral vertices coords
+ output_size (int): minimal side length of output image
+ (if -1 will be actual side of image)
+ border_color (Tuple[int, int, int]):
+
+ Returns:
+ np.ndarray: image crop
+ """
+ lenghts = np.linalg.norm(vertices - np.roll(vertices, -1, 0), axis=1)
+
+ len_ab, len_bc, len_cd, len_da = lenghts.tolist()
+
+ width = (len_ab + len_cd) / 2
+ height = (len_bc + len_da) / 2
+
+ if output_size > 0:
+ scale = output_size / max(width, height)
+ width, height = (dim * scale for dim in (width, height))
+
+ width, height = round(width), round(height)
+
+ crop = perspective_crop(image, vertices, (width, height), border_color)
+
+ return crop
+
+
+def crop_by_masks(
+ image: np.ndarray, mask: np.ndarray, image_size: int = 512
+) -> List[np.ndarray]:
+ crops = [
+ perspective_crop_keep_ratio(image, rect, image_size)
+ for rect in get_rects_from_mask(mask)
+ ]
+
+ return crops
From 63a1399d26c9aec9f72807f75c62da144a2377aa Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Sat, 1 Feb 2020 17:06:29 +0300
Subject: [PATCH 03/12] update configs (add processor)
---
configs/templates/binary.yml | 3 +++
configs/templates/semantic.yml | 4 ++++
2 files changed, 7 insertions(+)
diff --git a/configs/templates/binary.yml b/configs/templates/binary.yml
index 0ba991a..cdfa7a0 100644
--- a/configs/templates/binary.yml
+++ b/configs/templates/binary.yml
@@ -118,6 +118,9 @@ stages:
loss_aggregate_fn: "mean" # or "sum"
multiplier: 1.0 # scale factor for the aggregated loss
+ raw_processor:
+ callback: RawMaskPostprocessingCallback
+
iou_soft:
callback: IouCallback
input_key: mask
diff --git a/configs/templates/semantic.yml b/configs/templates/semantic.yml
index e499599..386e8d2 100644
--- a/configs/templates/semantic.yml
+++ b/configs/templates/semantic.yml
@@ -118,6 +118,10 @@ stages:
loss_aggregate_fn: "mean" # or "sum"
multiplier: 1.0 # scale factor for the aggregated loss
+ raw_processor:
+ callback: RawMaskPostprocessingCallback
+ output_key: semantic_mask
+
iou_soft:
callback: IouCallback
input_key: mask
From 11f34b71c0ae267999692298cb5180b8179cc217 Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Sat, 1 Feb 2020 17:07:35 +0300
Subject: [PATCH 04/12] add instance pipeline check
---
.travis.yml | 6 ++++++
bin/tests/_check_instance.sh | 41 ++++++++++++++++++++++++++++++++++++
2 files changed, 47 insertions(+)
create mode 100644 bin/tests/_check_instance.sh
diff --git a/.travis.yml b/.travis.yml
index a25f57a..e21a603 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -43,3 +43,9 @@ jobs:
install: *requirements_dl
script:
- bash ./bin/tests/_check_semantic.sh
+
+ - stage: Pipelines
+ name: "Pipelines - instance segmentation"
+ install: *requirements_dl
+ script:
+ - bash ./bin/tests/_check_instance.sh
diff --git a/bin/tests/_check_instance.sh b/bin/tests/_check_instance.sh
new file mode 100644
index 0000000..83fd3db
--- /dev/null
+++ b/bin/tests/_check_instance.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+set -e
+
+mkdir -p data
+
+download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
+tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
+mv dsb2018_cleared_191109 ./data/origin
+
+USE_WANDB=0 \
+CUDA_VISIBLE_DEVICES="" \
+CUDNN_BENCHMARK="True" \
+CUDNN_DETERMINISTIC="True" \
+WORKDIR=./logs \
+DATADIR=./data/origin \
+MAX_IMAGE_SIZE=256 \
+CONFIG_TEMPLATE=./configs/templates/instance.yml \
+NUM_WORKERS=0 \
+BATCH_SIZE=2 \
+bash ./bin/catalyst-instance-segmentation-pipeline.sh --check
+
+
+python -c """
+import pathlib
+from safitty import Safict
+
+folder = list(pathlib.Path('./logs/').glob('logdir-*'))[0]
+metrics = Safict.load(f'{folder}/checkpoints/_metrics.json')
+
+aggregated_loss = metrics.get('best', 'loss')
+iou_soft = metrics.get('best', 'iou_soft')
+iou_hard = metrics.get('best', 'iou_hard')
+
+print(aggregated_loss)
+print(iou_soft)
+print(iou_hard)
+
+assert aggregated_loss < 0.9
+assert iou_soft > 0.06
+assert iou_hard > 0.1
+"""
From 0dfcd05d335dd31ba3afbdbff835608054532d8a Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Sat, 1 Feb 2020 17:08:18 +0300
Subject: [PATCH 05/12] temp fix for registry
---
src/__init__.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/src/__init__.py b/src/__init__.py
index 3754036..087d262 100644
--- a/src/__init__.py
+++ b/src/__init__.py
@@ -21,3 +21,8 @@
from . import callbacks
registry.CALLBACKS.add_from_module(callbacks)
+
+
+# @TODO: fix catalyst.contrib.registry as it doesn't load contrib.models.cv
+from catalyst.contrib.models.cv import segmentation as m
+registry.MODELS.add_from_module(m)
From cfec7c3ed1b909887ea6cf61fe27e726a3865aef Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Mon, 3 Feb 2020 00:13:27 +0300
Subject: [PATCH 06/12] update configs
---
configs/_common.yml | 5 +++++
configs/templates/binary.yml | 5 -----
configs/templates/instance.yml | 5 -----
configs/templates/semantic.yml | 5 -----
4 files changed, 5 insertions(+), 15 deletions(-)
diff --git a/configs/_common.yml b/configs/_common.yml
index 49fe784..44b52b3 100644
--- a/configs/_common.yml
+++ b/configs/_common.yml
@@ -3,6 +3,11 @@ model_params:
arch: resnet18
pretrained: True
+runner_params:
+ input_key: image
+ input_target_key: mask
+ output_key: logits
+
stages:
# train head
diff --git a/configs/templates/binary.yml b/configs/templates/binary.yml
index cdfa7a0..644fd18 100644
--- a/configs/templates/binary.yml
+++ b/configs/templates/binary.yml
@@ -7,11 +7,6 @@ model_params:
args:
expdir: {{ expdir }}
-runner_params:
- input_key: image
- input_target_key: mask
- output_key: logits
-
stages:
state_params:
diff --git a/configs/templates/instance.yml b/configs/templates/instance.yml
index e26c62b..c9c865c 100644
--- a/configs/templates/instance.yml
+++ b/configs/templates/instance.yml
@@ -7,11 +7,6 @@ model_params:
args:
expdir: {{ expdir }}
-runner_params:
- input_key: image
- input_target_key: mask
- output_key: logits
-
stages:
state_params:
diff --git a/configs/templates/semantic.yml b/configs/templates/semantic.yml
index 386e8d2..e4c8b93 100644
--- a/configs/templates/semantic.yml
+++ b/configs/templates/semantic.yml
@@ -7,11 +7,6 @@ model_params:
args:
expdir: {{ expdir }}
-runner_params:
- input_key: image
- input_target_key: mask
- output_key: logits
-
stages:
state_params:
From 4898125610b2f1aa7d63e1a5915ad5d477bc0839 Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Tue, 18 Feb 2020 00:25:50 +0300
Subject: [PATCH 07/12] update cli in instance pipeline
---
...catalyst-instance-segmentation-pipeline.sh | 208 +++++++++---------
bin/tests/_check_instance.sh | 20 +-
2 files changed, 119 insertions(+), 109 deletions(-)
diff --git a/bin/catalyst-instance-segmentation-pipeline.sh b/bin/catalyst-instance-segmentation-pipeline.sh
index fab5f7c..6f90e0a 100644
--- a/bin/catalyst-instance-segmentation-pipeline.sh
+++ b/bin/catalyst-instance-segmentation-pipeline.sh
@@ -7,130 +7,140 @@
#version :19.11.1
#==============================================================================
-# usage:
-# WORKDIR=/path/to/logdir \
-# DATADIR=/path/to/dataset \
-# IMAGE_SIZE=... \
-# CONFIG_TEMPLATE=... \ # model config to use
-# ./bin/catalyst-instance-segmentation-pipeline.sh
-
-# example:
-# CUDA_VISIBLE_DEVICES=0 \
-# CUDNN_BENCHMARK="True" \
-# CUDNN_DETERMINISTIC="True" \
-# WORKDIR=./logs \
-# DATADIR=./data/origin \
-# IMAGE_SIZE=256 \
-# CONFIG_TEMPLATE=./configs/templates/instance.yml \
-# NUM_WORKERS=4 \
-# BATCH_SIZE=256 \
-# ./bin/catalyst-instance-segmentation-pipeline.sh
-
set -e
-# --- test part
-# uncomment and run bash ./bin/catalyst-instance-segmentation-pipeline.sh
-
-#mkdir -p ./data
-#download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
-#tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
-#mv dsb2018_cleared_191109 ./data/origin
-#
-#export CUDNN_BENCHMARK="True"
-#export CUDNN_DETERMINISTIC="True"
-#
-#export CONFIG_TEMPLATE=./configs/templates/instance.yml
-#export WORKDIR=./logs
-#export DATADIR=./data/origin
-#export NUM_WORKERS=4
-#export BATCH_SIZE=64
-#export IMAGE_SIZE=256
-
-# ---- environment variables
-
-if [[ -z "$NUM_WORKERS" ]]; then
- NUM_WORKERS=4
-fi
-
-if [[ -z "$BATCH_SIZE" ]]; then
- BATCH_SIZE=64
-fi
+usage()
+{
+ cat << USAGE >&2
+Usage: $(basename "$0") [OPTION...] [catalyst-dl run args...]
+
+ -s, --skipdata Skip data preparation
+ -j, --num-workers NUM_WORKERS Number of data loading/processing workers
+ -b, --batch-size BATCH_SIZE Mini-batch size
+ --max-image-size MAX_IMAGE_SIZE Target size of images e.g. 256
+ --config-template CONFIG_TEMPLATE Model config to use
+ --datadir DATADIR
+ --workdir WORKDIR
+ catalyst-dl run args Execute \`catalyst-dl run\` with args
+
+Example:
+ CUDA_VISIBLE_DEVICES=0 \\
+ CUDNN_BENCHMARK="True" \\
+ CUDNN_DETERMINISTIC="True" \\
+ ./bin/catalyst-instance-segmentation-pipeline.sh \\
+ --workdir ./logs \\
+ --datadir ./data/origin \\
+ --max-image-size 256 \\
+ --config-template ./configs/templates/instance.yml \\
+ --num-workers 4 \\
+ --batch-size 256
+USAGE
+ exit 1
+}
-if [[ -z "$IMAGE_SIZE" ]]; then
- IMAGE_SIZE=256
-fi
-if [[ -z "$CONFIG_TEMPLATE" ]]; then
- CONFIG_TEMPLATE="./configs/templates/instance.yml"
-fi
-
-if [[ -z "$DATADIR" ]]; then
- DATADIR="./data/origin"
-fi
-
-if [[ -z "$WORKDIR" ]]; then
- WORKDIR="./logs"
-fi
+# ---- environment variables
+NUM_WORKERS=${NUM_WORKERS:=4}
+BATCH_SIZE=${BATCH_SIZE:=64}
+MAX_IMAGE_SIZE=${MAX_IMAGE_SIZE:=256}
+CONFIG_TEMPLATE=${CONFIG_TEMPLATE:="./configs/templates/instance.yml"}
+DATADIR=${DATADIR:="./data/origin"}
+WORKDIR=${WORKDIR:="./logs"}
SKIPDATA=""
-while getopts ":s" flag; do
- case "${flag}" in
- s) SKIPDATA="true" ;;
+_run_args=""
+while (( "$#" )); do
+ case "$1" in
+ -j|--num-workers)
+ NUM_WORKERS=$2
+ shift 2
+ ;;
+ -b|--batch-size)
+ BATCH_SIZE=$2
+ shift 2
+ ;;
+ --max-image-size)
+ MAX_IMAGE_SIZE=$2
+ shift 2
+ ;;
+ --config-template)
+ CONFIG_TEMPLATE=$2
+ shift 2
+ ;;
+ --datadir)
+ DATADIR=$2
+ shift 2
+ ;;
+ --workdir)
+ WORKDIR=$2
+ shift 2
+ ;;
+ -s|--skipdata)
+ SKIPDATA="true"
+ shift
+ ;;
+ -h|--help)
+ usage
+ ;;
+ *)
+ _run_args="${_run_args} $1"
+ shift
+ ;;
esac
done
date=$(date +%y%m%d-%H%M%S)
postfix=$(openssl rand -hex 4)
-logname="$date-$postfix"
-export DATASET_DIR=$WORKDIR/dataset
-export RAW_MASKS_DIR=$DATASET_DIR/raw_masks
-export CONFIG_DIR=$WORKDIR/configs-${logname}
-export LOGDIR=$WORKDIR/logdir-${logname}
+logname="${date}-${postfix}"
+export DATASET_DIR=${WORKDIR}/dataset
+export RAW_MASKS_DIR=${DATASET_DIR}/raw_masks
+export CONFIG_DIR=${WORKDIR}/configs-${logname}
+export LOGDIR=${WORKDIR}/logdir-${logname}
+
+for dir in ${WORKDIR} ${DATASET_DIR} ${CONFIG_DIR} ${LOGDIR}; do
+ mkdir -p ${dir}
+done
-mkdir -p $WORKDIR
-mkdir -p $DATASET_DIR
-mkdir -p $CONFIG_DIR
-mkdir -p $LOGDIR
# ---- data preparation
if [[ -z "${SKIPDATA}" ]]; then
- cp -R $DATADIR/* $DATASET_DIR/
-
- mkdir -p $DATASET_DIR/masks
- python scripts/process_instance_masks.py \
- --in-dir $RAW_MASKS_DIR \
- --out-dir $DATASET_DIR/masks \
- --num-workers $NUM_WORKERS
-
- python scripts/image2mask.py \
- --in-dir $DATASET_DIR \
- --out-dataset $DATASET_DIR/dataset_raw.csv
-
- catalyst-data split-dataframe \
- --in-csv $DATASET_DIR/dataset_raw.csv \
- --n-folds=5 --train-folds=0,1,2,3 \
- --out-csv=$DATASET_DIR/dataset.csv
+ cp -R ${DATADIR}/* ${DATASET_DIR}/
+
+ mkdir -p ${DATASET_DIR}/masks
+ python scripts/process_instance_masks.py \
+ --in-dir ${RAW_MASKS_DIR} \
+ --out-dir ${DATASET_DIR}/masks \
+ --num-workers ${NUM_WORKERS}
+
+ python scripts/image2mask.py \
+ --in-dir ${DATASET_DIR} \
+ --out-dataset ${DATASET_DIR}/dataset_raw.csv
+
+ catalyst-data split-dataframe \
+ --in-csv ${DATASET_DIR}/dataset_raw.csv \
+ --n-folds=5 --train-folds=0,1,2,3 \
+ --out-csv=${DATASET_DIR}/dataset.csv
fi
# ---- config preparation
python ./scripts/prepare_config.py \
- --in-template=$CONFIG_TEMPLATE \
- --out-config=$CONFIG_DIR/config.yml \
- --expdir=./src \
- --dataset-path=$DATASET_DIR \
- --num-classes=2 \
- --num-workers=$NUM_WORKERS \
- --batch-size=$BATCH_SIZE \
- --max-image-size=$IMAGE_SIZE
+ --in-template=${CONFIG_TEMPLATE} \
+ --out-config=${CONFIG_DIR}/config.yml \
+ --expdir=./src \
+ --dataset-path=${DATASET_DIR} \
+ --num-classes=2 \
+ --num-workers=${NUM_WORKERS} \
+ --batch-size=${BATCH_SIZE} \
+ --max-image-size=${MAX_IMAGE_SIZE}
-cp -r ./configs/_common.yml $CONFIG_DIR/_common.yml
+cp -r ./configs/_common.yml ${CONFIG_DIR}/_common.yml
# ---- model training
catalyst-dl run \
- -C $CONFIG_DIR/_common.yml $CONFIG_DIR/config.yml \
- --logdir $LOGDIR $*
+ -C ${CONFIG_DIR}/_common.yml ${CONFIG_DIR}/config.yml \
+ --logdir ${LOGDIR} ${_run_args}
diff --git a/bin/tests/_check_instance.sh b/bin/tests/_check_instance.sh
index 83fd3db..b868f24 100644
--- a/bin/tests/_check_instance.sh
+++ b/bin/tests/_check_instance.sh
@@ -1,23 +1,23 @@
#!/usr/bin/env bash
set -e
-mkdir -p data
+mkdir -p ./data
download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
mv dsb2018_cleared_191109 ./data/origin
-USE_WANDB=0 \
CUDA_VISIBLE_DEVICES="" \
CUDNN_BENCHMARK="True" \
CUDNN_DETERMINISTIC="True" \
-WORKDIR=./logs \
-DATADIR=./data/origin \
-MAX_IMAGE_SIZE=256 \
-CONFIG_TEMPLATE=./configs/templates/instance.yml \
-NUM_WORKERS=0 \
-BATCH_SIZE=2 \
-bash ./bin/catalyst-instance-segmentation-pipeline.sh --check
+bash ./bin/catalyst-instance-segmentation-pipeline.sh \
+ --config-template ./configs/templates/instance.yml \
+ --workdir ./logs \
+ --datadir ./data/origin \
+ --num-workers 0 \
+ --batch-size 2 \
+ --max-image-size 256 \
+ --check
python -c """
@@ -36,6 +36,6 @@ print(iou_soft)
print(iou_hard)
assert aggregated_loss < 0.9
-assert iou_soft > 0.06
+assert iou_soft > 0.05
assert iou_hard > 0.1
"""
From a26cf6bddc2ad7815874a45cb613a643faf2d9df Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Tue, 10 Mar 2020 17:57:29 +0300
Subject: [PATCH 08/12] (catalyst 20.03): update scripts
---
.pre-commit-config.yaml | 2 +-
Makefile | 2 +-
bin/catalyst-binary-segmentation-pipeline.sh | 2 +-
...catalyst-instance-segmentation-pipeline.sh | 2 +-
...catalyst-semantic-segmentation-pipeline.sh | 2 +-
.../check_codestyle.sh} | 0
bin/{_flake8.sh => codestyle/flake8.sh} | 0
bin/{_yapf.sh => codestyle/yapf.sh} | 0
.../{_check_binary.sh => check_binary.sh} | 0
.../{_check_instance.sh => check_instance.sh} | 19 ++++++++++++++++++-
.../{_check_semantic.sh => check_semantic.sh} | 0
teamcity/instance.sh | 9 +++++----
12 files changed, 28 insertions(+), 10 deletions(-)
rename bin/{_check_codestyle.sh => codestyle/check_codestyle.sh} (100%)
rename bin/{_flake8.sh => codestyle/flake8.sh} (100%)
rename bin/{_yapf.sh => codestyle/yapf.sh} (100%)
rename bin/tests/{_check_binary.sh => check_binary.sh} (100%)
rename bin/tests/{_check_instance.sh => check_instance.sh} (67%)
rename bin/tests/{_check_semantic.sh => check_semantic.sh} (100%)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a2b11d8..a80b1e6 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,7 +2,7 @@
hooks:
- id: yapf
name: yapf
- entry: bash ./bin/_yapf.sh --all-in-place
+ entry: bash ./bin/codestyle/yapf.sh --all-in-place
language: system
files: \.py$
require_serial: true
diff --git a/Makefile b/Makefile
index 1d125d6..13759c8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
.PHONY: check-style codestyle docker-build clean
check-style:
- bash ./bin/_check_codestyle.sh -s
+ bash ./bin/codestyle/check_codestyle.sh -s
codestyle:
pre-commit run
diff --git a/bin/catalyst-binary-segmentation-pipeline.sh b/bin/catalyst-binary-segmentation-pipeline.sh
index cf616df..9499dac 100755
--- a/bin/catalyst-binary-segmentation-pipeline.sh
+++ b/bin/catalyst-binary-segmentation-pipeline.sh
@@ -4,7 +4,7 @@
#author :Sergey Kolesnikov, Yauheni Kachan
#author_email :scitator@gmail.com, yauheni.kachan@gmail.com
#date :20191016
-#version :19.10.2
+#version :20.03
#==============================================================================
set -e
diff --git a/bin/catalyst-instance-segmentation-pipeline.sh b/bin/catalyst-instance-segmentation-pipeline.sh
index 6f90e0a..d4581d3 100644
--- a/bin/catalyst-instance-segmentation-pipeline.sh
+++ b/bin/catalyst-instance-segmentation-pipeline.sh
@@ -4,7 +4,7 @@
#author :Sergey Kolesnikov, Yauheni Kachan
#author_email :scitator@gmail.com, yauheni.kachan@gmail.com
#date :20191109
-#version :19.11.1
+#version :20.03
#==============================================================================
set -e
diff --git a/bin/catalyst-semantic-segmentation-pipeline.sh b/bin/catalyst-semantic-segmentation-pipeline.sh
index c93b319..2e4f0bc 100755
--- a/bin/catalyst-semantic-segmentation-pipeline.sh
+++ b/bin/catalyst-semantic-segmentation-pipeline.sh
@@ -4,7 +4,7 @@
#author :Sergey Kolesnikov, Yauheni Kachan
#author_email :scitator@gmail.com, yauheni.kachan@gmail.com
#date :20191016
-#version :19.10.2
+#version :20.03
#==============================================================================
set -e
diff --git a/bin/_check_codestyle.sh b/bin/codestyle/check_codestyle.sh
similarity index 100%
rename from bin/_check_codestyle.sh
rename to bin/codestyle/check_codestyle.sh
diff --git a/bin/_flake8.sh b/bin/codestyle/flake8.sh
similarity index 100%
rename from bin/_flake8.sh
rename to bin/codestyle/flake8.sh
diff --git a/bin/_yapf.sh b/bin/codestyle/yapf.sh
similarity index 100%
rename from bin/_yapf.sh
rename to bin/codestyle/yapf.sh
diff --git a/bin/tests/_check_binary.sh b/bin/tests/check_binary.sh
similarity index 100%
rename from bin/tests/_check_binary.sh
rename to bin/tests/check_binary.sh
diff --git a/bin/tests/_check_instance.sh b/bin/tests/check_instance.sh
similarity index 67%
rename from bin/tests/_check_instance.sh
rename to bin/tests/check_instance.sh
index b868f24..99b48e5 100644
--- a/bin/tests/_check_instance.sh
+++ b/bin/tests/check_instance.sh
@@ -1,12 +1,25 @@
#!/usr/bin/env bash
-set -e
+# Cause the script to exit if a single command fails
+set -eo pipefail -v
+
+
+################################### DATA ####################################
+rm -rf ./data
+
+# load the data
mkdir -p ./data
download-gdrive 1RCqaQZLziuq1Z4sbMpwD_WHjqR5cdPvh dsb2018_cleared_191109.tar.gz
tar -xf dsb2018_cleared_191109.tar.gz &>/dev/null
mv dsb2018_cleared_191109 ./data/origin
+
+################################ pipeline 00 ################################
+rm -rf ./logs
+
+
+################################ pipeline 01 ################################
CUDA_VISIBLE_DEVICES="" \
CUDNN_BENCHMARK="True" \
CUDNN_DETERMINISTIC="True" \
@@ -39,3 +52,7 @@ assert aggregated_loss < 0.9
assert iou_soft > 0.05
assert iou_hard > 0.1
"""
+
+
+################################ pipeline 99 ################################
+rm -rf ./logs
diff --git a/bin/tests/_check_semantic.sh b/bin/tests/check_semantic.sh
similarity index 100%
rename from bin/tests/_check_semantic.sh
rename to bin/tests/check_semantic.sh
diff --git a/teamcity/instance.sh b/teamcity/instance.sh
index ede0f3a..f414784 100644
--- a/teamcity/instance.sh
+++ b/teamcity/instance.sh
@@ -1,7 +1,8 @@
-echo "pip install -r requirements/requirements.txt"
+#!/usr/bin/env bash
+
+# Cause the script to exit if a single command fails
+set -eo pipefail -v
+
pip install -r requirements/requirements.txt
-echo "bash ./bin/tests/_check_instance.sh"
bash ./bin/tests/_check_instance.sh
-
-rm -rf ./data ./logs
From e6564294203e1c67302b1298edf67eedcb914e75 Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Tue, 10 Mar 2020 18:50:50 +0300
Subject: [PATCH 09/12] (catalyst 20.03): update code
---
bin/tests/check_instance.sh | 2 +-
configs/templates/binary.yml | 3 ---
configs/templates/instance.yml | 13 +++++++------
configs/templates/semantic.yml | 4 ----
scripts/process_instance_masks.py | 13 +++++--------
src/callbacks/io.py | 11 ++++++-----
src/callbacks/processing.py | 22 +++++++++++-----------
7 files changed, 30 insertions(+), 38 deletions(-)
diff --git a/bin/tests/check_instance.sh b/bin/tests/check_instance.sh
index 99b48e5..83c0a4a 100644
--- a/bin/tests/check_instance.sh
+++ b/bin/tests/check_instance.sh
@@ -49,7 +49,7 @@ print(iou_soft)
print(iou_hard)
assert aggregated_loss < 0.9
-assert iou_soft > 0.05
+assert iou_soft > 0.04
assert iou_hard > 0.1
"""
diff --git a/configs/templates/binary.yml b/configs/templates/binary.yml
index 85324a4..bf79fbd 100644
--- a/configs/templates/binary.yml
+++ b/configs/templates/binary.yml
@@ -118,9 +118,6 @@ stages:
callback: RawMaskPostprocessingCallback
output_key: binary_mask
- raw_processor:
- callback: RawMaskPostprocessingCallback
-
iou_soft:
callback: IouCallback
input_key: mask
diff --git a/configs/templates/instance.yml b/configs/templates/instance.yml
index 18d8eca..ac6d919 100644
--- a/configs/templates/instance.yml
+++ b/configs/templates/instance.yml
@@ -10,7 +10,7 @@ args:
stages:
state_params:
- main_metric: &reduce_metric iou_hard
+ main_metric: &reduced_metric iou_hard
minimize_metric: False
data_params:
@@ -56,7 +56,7 @@ stages:
contrast_limit: 0.5
- transform: A.RandomGamma
- transform: A.CLAHE
- - transform: A.JpegCompression
+ - transform: A.ImageCompression
quality_lower: 50
- &post_transforms
transform: A.Compose
@@ -108,10 +108,11 @@ stages:
multiplier: 1.0
loss_aggregator:
- callback: CriterionAggregatorCallback
+ callback: MetricAggregationCallback
prefix: &aggregated_loss loss
- loss_aggregate_fn: "mean" # or "sum"
- multiplier: 1.0 # scale factor for the aggregated loss
+ metrics: [loss_bce, loss_dice, loss_iou]
+ mode: "mean"
+ multiplier: 1.0
raw_processor:
callback: RawMaskPostprocessingCallback
@@ -140,7 +141,7 @@ stages:
loss_key: *aggregated_loss
scheduler:
callback: SchedulerCallback
- reduce_metric: *reduce_metric
+ reduced_metric: *reduced_metric
saver:
callback: CheckpointCallback
diff --git a/configs/templates/semantic.yml b/configs/templates/semantic.yml
index efa2d45..166256f 100644
--- a/configs/templates/semantic.yml
+++ b/configs/templates/semantic.yml
@@ -118,10 +118,6 @@ stages:
callback: RawMaskPostprocessingCallback
output_key: semantic_mask
- raw_processor:
- callback: RawMaskPostprocessingCallback
- output_key: semantic_mask
-
iou_soft:
callback: IouCallback
input_key: mask
diff --git a/scripts/process_instance_masks.py b/scripts/process_instance_masks.py
index 86184b8..ecfe6b5 100644
--- a/scripts/process_instance_masks.py
+++ b/scripts/process_instance_masks.py
@@ -14,10 +14,7 @@
def build_args(parser):
parser.add_argument(
- "--in-dir",
- type=Path,
- required=True,
- help="Raw masks folder path"
+ "--in-dir", type=Path, required=True, help="Raw masks folder path"
)
parser.add_argument(
"--out-dir",
@@ -151,10 +148,10 @@ def preprocess(self, sample: Path):
else:
sz = 3
- uniq = np.unique(labels[
- max(0, y0 - sz):min(labels.shape[0], y0 + sz + 1),
- max(0, x0 - sz):min(labels.shape[1], x0 + sz + 1),
- ])
+ uniq = np.unique(
+ labels[max(0, y0 - sz):min(labels.shape[0], y0 + sz + 1),
+ max(0, x0 - sz):min(labels.shape[1], x0 + sz + 1)]
+ )
if len(uniq[uniq > 0]) > 1:
borders[y0, x0] = 255
mask_without_borders[y0, x0] = 0
diff --git a/src/callbacks/io.py b/src/callbacks/io.py
index d64ad7a..d459517 100644
--- a/src/callbacks/io.py
+++ b/src/callbacks/io.py
@@ -3,7 +3,7 @@
import imageio
import numpy as np
-from catalyst.dl import Callback, CallbackOrder, State, utils
+from catalyst.dl import Callback, CallbackNode, CallbackOrder, State, utils
from .utils import crop_by_masks, mask_to_overlay_image
@@ -17,7 +17,7 @@ def __init__(
input_key: str = "image",
outpath_key: str = "name",
):
- super().__init__(CallbackOrder.Logging)
+ super().__init__(order=CallbackOrder.Logging, node=CallbackNode.Master)
self.output_dir = Path(output_dir)
self.relative = relative
self.filename_suffix = filename_suffix
@@ -102,10 +102,11 @@ def __init__(
self.output_key = output_key
def on_batch_end(self, state: State):
- names = state.input[self.outpath_key]
- images = utils.tensor_to_ndimage(state.input[self.input_key].cpu())
- masks = state.output[self.output_key]
+ names = state.batch_in[self.outpath_key]
+ images = state.batch_in[self.input_key]
+ masks = state.batch_out[self.output_key]
+ images = utils.tensor_to_ndimage(images.detach().cpu())
for name, image, masks_ in zip(names, images, masks):
instances = crop_by_masks(image, masks_)
diff --git a/src/callbacks/processing.py b/src/callbacks/processing.py
index c385caf..f148b4f 100644
--- a/src/callbacks/processing.py
+++ b/src/callbacks/processing.py
@@ -1,6 +1,6 @@
import torch
-from catalyst.dl import Callback, CallbackOrder, State
+from catalyst.dl import Callback, CallbackNode, CallbackOrder, State
from .utils import encode_mask_with_color, label_instances
@@ -11,7 +11,7 @@ def __init__(
input_key: str = "logits",
output_key: str = "mask",
):
- super().__init__(CallbackOrder.Internal)
+ super().__init__(order=CallbackOrder.Internal, node=CallbackNode.All)
self.threshold = threshold
self.input_key = input_key
self.output_key = output_key
@@ -21,7 +21,7 @@ def on_batch_end(self, state: State):
output = torch.sigmoid(output).detach().cpu().numpy()
state.batch_out[self.output_key] = encode_mask_with_color(
- output, self.threshold
+ output, threshold=self.threshold
)
@@ -35,7 +35,7 @@ def __init__(
out_key_semantic: str = None,
out_key_border: str = None,
):
- super().__init__(CallbackOrder.Internal)
+ super().__init__(CallbackOrder.Internal, node=CallbackNode.All)
self.watershed_threshold = watershed_threshold
self.mask_threshold = mask_threshold
self.input_key = input_key
@@ -44,22 +44,22 @@ def __init__(
self.out_key_border = out_key_border
def on_batch_end(self, state: State):
- output: torch.Tensor = torch.sigmoid(state.output[self.input_key])
+ output = state.batch_out[self.input_key]
+ output = torch.sigmoid(output).detach().cpu()
semantic, border = output.chunk(2, -3)
if self.out_key_semantic is not None:
- state.output[self.out_key_semantic] = encode_mask_with_color(
- semantic.data.cpu().numpy(), threshold=self.mask_threshold
+ state.batch_out[self.out_key_semantic] = encode_mask_with_color(
+ semantic.numpy(), threshold=self.mask_threshold
)
if self.out_key_border is not None:
- state.output[self.out_key_border] = (
- border.data.cpu().squeeze(-3).numpy() >
- self.watershed_threshold
+ state.batch_out[self.out_key_border] = (
+ border.squeeze(-3).numpy() > self.watershed_threshold
)
- state.output[self.output_key] = label_instances(
+ state.batch_out[self.output_key] = label_instances(
semantic,
border,
watershed_threshold=self.watershed_threshold,
From 802ab2e7b81d093dc9cbceae83ac56476d10b5ba Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Tue, 10 Mar 2020 22:33:00 +0300
Subject: [PATCH 10/12] fix scripts
---
bin/codestyle/check_codestyle.sh | 18 +++++++++---------
teamcity/binary.sh | 2 +-
teamcity/instance.sh | 2 +-
teamcity/semantic.sh | 2 +-
4 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/bin/codestyle/check_codestyle.sh b/bin/codestyle/check_codestyle.sh
index 662f7f1..f2ac455 100755
--- a/bin/codestyle/check_codestyle.sh
+++ b/bin/codestyle/check_codestyle.sh
@@ -12,19 +12,19 @@ while getopts ":s" flag; do
esac
done
-echo 'isort: `isort -rc --check-only --settings-path ./setup.cfg`'
+echo 'isort -rc --check-only --settings-path ./setup.cfg'
isort -rc --check-only --settings-path ./setup.cfg
# stop the build if there are any unexpected flake8 issues
-echo 'flake8: `bash ./bin/_flake8.sh`'
-bash ./bin/_flake8.sh --count \
+echo 'bash ./bin/codestyle/flake8.sh'
+bash ./bin/codestyle/flake8.sh --count \
--config=./setup.cfg \
--show-source \
--statistics
# exit-zero treats all errors as warnings.
-echo 'flake8 (warnings): `flake8 .`'
-flake8 ./bin/_flake8.sh --count \
+echo 'flake8'
+flake8 ./bin/codestyle/flake8.sh --count \
--config=./setup.cfg \
--max-complexity=10 \
--show-source \
@@ -33,9 +33,9 @@ flake8 ./bin/_flake8.sh --count \
# test to make sure the code is yapf compliant
if [[ -f ${skip_inplace} ]]; then
- echo 'yapf: `bash ./bin/_yapf.sh --all`'
- bash ./bin/_yapf.sh --all
+ echo 'bash ./bin/codestyle/yapf.sh --all'
+ bash ./bin/codestyle/yapf.sh --all
else
- echo 'yapf: `bash ./bin/_yapf.sh --all-in-place`'
- bash ./bin/_yapf.sh --all-in-place
+ echo 'bash ./bin/codestyle/yapf.sh --all-in-place'
+ bash ./bin/codestyle/yapf.sh --all-in-place
fi
diff --git a/teamcity/binary.sh b/teamcity/binary.sh
index bf75458..a3617a7 100644
--- a/teamcity/binary.sh
+++ b/teamcity/binary.sh
@@ -5,4 +5,4 @@ set -eo pipefail -v
pip install -r requirements/requirements.txt
-bash ./bin/tests/_check_binary.sh
+bash ./bin/tests/check_binary.sh
diff --git a/teamcity/instance.sh b/teamcity/instance.sh
index f414784..c1b2466 100644
--- a/teamcity/instance.sh
+++ b/teamcity/instance.sh
@@ -5,4 +5,4 @@ set -eo pipefail -v
pip install -r requirements/requirements.txt
-bash ./bin/tests/_check_instance.sh
+bash ./bin/tests/check_instance.sh
diff --git a/teamcity/semantic.sh b/teamcity/semantic.sh
index 794e0f2..b1bf33c 100644
--- a/teamcity/semantic.sh
+++ b/teamcity/semantic.sh
@@ -5,4 +5,4 @@ set -eo pipefail -v
pip install -r requirements/requirements.txt
-bash ./bin/tests/_check_semantic.sh
+bash ./bin/tests/check_semantic.sh
From 0e128e358f18d15971a55f697a4e4fad623ebc70 Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Mon, 30 Mar 2020 18:19:49 +0300
Subject: [PATCH 11/12] update readme
---
README.md | 142 ++++++++++++++++++-----------------------
pics/wandb_metrics.png | Bin 18543 -> 0 bytes
2 files changed, 62 insertions(+), 80 deletions(-)
delete mode 100644 pics/wandb_metrics.png
diff --git a/README.md b/README.md
index 192ab39..3c1ccd7 100644
--- a/README.md
+++ b/README.md
@@ -227,35 +227,32 @@ We will initialize [Unet](https://arxiv.org/abs/1505.04597) model with a pre-tra
CUDA_VISIBLE_DEVICES=0 \
CUDNN_BENCHMARK="True" \
CUDNN_DETERMINISTIC="True" \
-WORKDIR=./logs \
-DATADIR=./data/origin \
-IMAGE_SIZE=256 \
-CONFIG_TEMPLATE=./configs/templates/binary.yml \
-NUM_WORKERS=4 \
-BATCH_SIZE=256 \
-bash ./bin/catalyst-binary-segmentation-pipeline.sh
+bash ./bin/catalyst-binary-segmentation-pipeline.sh \
+ --workdir ./logs \
+ --datadir ./data/origin \
+ --max-image-size 256 \
+ --config-template ./configs/templates/binary.yml \
+ --num-workers 4 \
+ --batch-size 256
```
#### Run in docker:
```bash
-export LOGDIR=$(pwd)/logs
docker run -it --rm --shm-size 8G --runtime=nvidia \
- -v $(pwd):/workspace/ \
- -v $LOGDIR:/logdir/ \
- -v $(pwd)/data/origin:/data \
- -e "CUDA_VISIBLE_DEVICES=0" \
- -e "USE_WANDB=1" \
- -e "LOGDIR=/logdir" \
- -e "CUDNN_BENCHMARK='True'" \
- -e "CUDNN_DETERMINISTIC='True'" \
- -e "WORKDIR=/logdir" \
- -e "DATADIR=/data" \
- -e "IMAGE_SIZE=256" \
- -e "CONFIG_TEMPLATE=./configs/templates/binary.yml" \
- -e "NUM_WORKERS=4" \
- -e "BATCH_SIZE=256" \
- catalyst-segmentation ./bin/catalyst-binary-segmentation-pipeline.sh
+ -v $(pwd):/workspace/ \
+ -v $(pwd)/logs:/logdir/ \
+ -v $(pwd)/data/origin:/data \
+ -e "CUDA_VISIBLE_DEVICES=0" \
+ -e "CUDNN_BENCHMARK='True'" \
+ -e "CUDNN_DETERMINISTIC='True'" \
+ catalyst-segmentation ./bin/catalyst-binary-segmentation-pipeline.sh \
+ --workdir /logdir \
+ --datadir /data \
+ --max-image-size 256 \
+ --config-template ./configs/templates/binary.yml \
+ --num-workers 4 \
+ --batch-size 256
```
@@ -271,35 +268,32 @@ docker run -it --rm --shm-size 8G --runtime=nvidia \
CUDA_VISIBLE_DEVICES=0 \
CUDNN_BENCHMARK="True" \
CUDNN_DETERMINISTIC="True" \
-WORKDIR=./logs \
-DATADIR=./data/origin \
-IMAGE_SIZE=256 \
-CONFIG_TEMPLATE=./configs/templates/semantic.yml \
-NUM_WORKERS=4 \
-BATCH_SIZE=256 \
-bash ./bin/catalyst-semantic-segmentation-pipeline.sh
+bash ./bin/catalyst-semantic-segmentation-pipeline.sh \
+ --workdir ./logs \
+ --datadir ./data/origin \
+ --max-image-size 256 \
+ --config-template ./configs/templates/semantic.yml \
+ --num-workers 4 \
+ --batch-size 256
```
#### Run in docker:
```bash
-export LOGDIR=$(pwd)/logs
docker run -it --rm --shm-size 8G --runtime=nvidia \
- -v $(pwd):/workspace/ \
- -v $LOGDIR:/logdir/ \
- -v $(pwd)/data/origin:/data \
- -e "CUDA_VISIBLE_DEVICES=0" \
- -e "USE_WANDB=1" \
- -e "LOGDIR=/logdir" \
- -e "CUDNN_BENCHMARK='True'" \
- -e "CUDNN_DETERMINISTIC='True'" \
- -e "WORKDIR=/logdir" \
- -e "DATADIR=/data" \
- -e "IMAGE_SIZE=256" \
- -e "CONFIG_TEMPLATE=./configs/templates/semantic.yml" \
- -e "NUM_WORKERS=4" \
- -e "BATCH_SIZE=256" \
- catalyst-segmentation ./bin/catalyst-semantic-segmentation-pipeline.sh
+ -v $(pwd):/workspace/ \
+ -v $(pwd)/logs:/logdir/ \
+ -v $(pwd)/data/origin:/data \
+ -e "CUDA_VISIBLE_DEVICES=0" \
+ -e "CUDNN_BENCHMARK='True'" \
+ -e "CUDNN_DETERMINISTIC='True'" \
+ catalyst-segmentation ./bin/catalyst-semantic-segmentation-pipeline.sh \
+ --workdir /logdir \
+ --datadir /data \
+ --max-image-size 256 \
+ --config-template ./configs/templates/semantic.yml \
+ --num-workers 4 \
+ --batch-size 256
```
@@ -315,35 +309,32 @@ docker run -it --rm --shm-size 8G --runtime=nvidia \
CUDA_VISIBLE_DEVICES=0 \
CUDNN_BENCHMARK="True" \
CUDNN_DETERMINISTIC="True" \
-WORKDIR=./logs \
-DATADIR=./data/origin \
-IMAGE_SIZE=256 \
-CONFIG_TEMPLATE=./configs/templates/instance.yml \
-NUM_WORKERS=4 \
-BATCH_SIZE=256 \
-bash ./bin/catalyst-instance-segmentation-pipeline.sh
+bash ./bin/catalyst-semantic-segmentation-pipeline.sh \
+ --workdir ./logs \
+ --datadir ./data/origin \
+ --max-image-size 256 \
+ --config-template ./configs/templates/instance.yml \
+ --num-workers 4 \
+ --batch-size 256
```
#### Run in docker:
```bash
-export LOGDIR=$(pwd)/logs
docker run -it --rm --shm-size 8G --runtime=nvidia \
- -v $(pwd):/workspace/ \
- -v $LOGDIR:/logdir/ \
- -v $(pwd)/data/origin:/data \
- -e "CUDA_VISIBLE_DEVICES=0" \
- -e "USE_WANDB=1" \
- -e "LOGDIR=/logdir" \
- -e "CUDNN_BENCHMARK='True'" \
- -e "CUDNN_DETERMINISTIC='True'" \
- -e "WORKDIR=/logdir" \
- -e "DATADIR=/data" \
- -e "IMAGE_SIZE=256" \
- -e "CONFIG_TEMPLATE=./configs/templates/instance.yml" \
- -e "NUM_WORKERS=4" \
- -e "BATCH_SIZE=256" \
- catalyst-segmentation ./bin/catalyst-instance-segmentation-pipeline.sh
+ -v $(pwd):/workspace/ \
+ -v $(pwd)/logs:/logdir/ \
+ -v $(pwd)/data/origin:/data \
+ -e "CUDA_VISIBLE_DEVICES=0" \
+ -e "CUDNN_BENCHMARK='True'" \
+ -e "CUDNN_DETERMINISTIC='True'" \
+ catalyst-segmentation ./bin/catalyst-instance-segmentation-pipeline.sh \
+ --workdir /logdir \
+ --datadir /data \
+ --max-image-size 256 \
+ --config-template ./configs/templates/instance.yml \
+ --num-workers 4 \
+ --batch-size 256
```
@@ -353,16 +344,7 @@ The pipeline is running and you don’t have to do anything else, it remains to
#### Visualizations
-You can use [W&B](https://www.wandb.com/) account for visualisation right after `pip install wandb`:
-
-```
-wandb: (1) Create a W&B account
-wandb: (2) Use an existing W&B account
-wandb: (3) Don't visualize my results
-```
-
-
-Tensorboard also can be used for visualisation:
+Tensorboard can be used for visualisation:
```bash
tensorboard --logdir=/catalyst.segmentation/logs
@@ -396,7 +378,7 @@ For your future experiments framework provides powerful configs allow to optimiz
* Common settings of stages of training and model parameters can be found in `catalyst.segmentation/configs/_common.yml`.
* `model_params`: detailed configuration of models, including:
- * model, for instance `ResnetUnet`
+ * model, for instance `ResNetUnet`
* detailed architecture description
* using pretrained model
* `stages`: you can configure training or inference in several stages with different hyperparameters. In our example:
diff --git a/pics/wandb_metrics.png b/pics/wandb_metrics.png
deleted file mode 100644
index 0dfd0a1423bc19581154277f9c9c37fbfa8cb21b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 18543
zcmeIaby(EF_b=F`7mr8ecNW)Un
zu+)7o@cG{FANM}beSW|D=Y2o8J3Bjb=FFKhuM;zCpt7PAJ}wz92n52Hk$$5J0^u}(
zKv;ZtZUHqRR&&b0+f9epGU|7L$K#I4R}hE>B=hE#x=Ygfl)Hx>H2Grta55@E#zASE
zK|z&GtoMi7&{I?`6Q
zc&d8;xwLN?(f58{i+=7lc~%88PKR!jrNb8ae(u91-tv*%?beL81g@wafrQK%*PcU<
zx>&83B#!{bE+5k7@9WtA`4EqH^Y3-pC-@DB;JVx@{r{>lqWC|RN%8Lp#&Z=-eaA|h
z3I~{56r{7X6h-LlB|Gw=;*AxbUIz2UIum5q&w58raA%#(@i2L2U@9yW`oU=9{18#<
z^k;eR^_Izrq5(S902sCtxkt^_-5u|dO>lN<>o_JE_kIKO&)<76_4oEhRLabBoa0F<
zgsOf^QWy~dEvPH93gtqYh}r!I3Ol+
zY9(@hU(9n=mSuPWx?a7@wjW1Ge`})I0o~}%s;M$HvwB|5YdP~+
zG^c*brqNai?PD|j?`IN{dZ1RUPM;?-w5CEK1m17neZUZEzEJ0#+rwI;X)qN#x~T{A
zKAwAhw$mMegsCxA%?hD~&g8|W$20X|CKMadMgQcj(Y@B_P9(IQeD`!Zt%llDnNKX5
zU#Xi1eWFCX8bN)gf$yoRG6hACmy2aNEZgi60!m5rC%#6u+;ww+=dnmZS~D%)49uPS
zD9Mt%E-TU2T`$C64VsEB6ZP2BpAz|XbduR!=v`gs&~muH(w)JXga>WI*JQ?LoVn2u
z+>4DP-i)X`hM|vhb+@{!Iffiog$2HvJf9RoM5)cS3wv+SWPXV3Xp
zXD#EihaXob3(u8PbHAex3HUq=QqPBNku9S+)WYXqQci~o_j@&mbD$>&ji!N7BSq^2
zht!2Lm5O3V8yX_6BQZ=GHO%lW+|K6URCghLDK6x+awc%x7Uuu`ks>MBBcOe(&
zeX4lYVJf-3(M)jm_vz}H!D*Ff$uRSXc$mK1K~DE^d(Zwtp;x|j*i4#(;LKpGt*4H`
zIEs4tw5oL7c5{fZfYhUNWG^uzZq>?pDosH@*P-VR`ZU@7iOUTT$mBuu!dmHOf6wYK
z_i5Tfx;BJ@L5%{lUZho%HF}%}3_qFd*&nc)bTf+FK8)_Mtx$3H-U)KETUH?jkE|w&
z)k!3xsRmMVQw2e-cL1m?R;`$QjNX*n)+R%0=4+&6)|9ZR<Tiy18`J~t
zY!zDGUMH03Mq;bITPnBAl+RH%Mun+I#z%X3@9W&tb0L&rki4@LUMY
zYHe-;eNZD1B+`ln0)>Ybf0W;Z*Xk#JqsvwDc3D9lxd_%6gw?DJov>-sRa!#V9Ghp1
z@9>?z4&!bTE37?cfavJLm_;3H;V}1?pz!SXzSuO(`#d!y)HgeFYkvJ0*3Ntfqmw{Q
zo@JvQ9}dViM`*ei!S}PthBBq<&p(PV<>uIAh@SG5TlmpkM5A(&hXEZC&edET%4SAz
z@00j|-88}Pwj`~fP3b>Fh26u>H>P38>RdJdB+iGL&h0ZVj3--2n}Jbm=XQU2<6jJJ
z;$fgTCdSxaGiKV`HG;}BN3B`zB16-C1of%8W$)egJ{s{(1{dMnVqmEPe{z;?ex{>DFCVY`?$s;!vm6)^BnpGb98zfk2{YXZzD4Y}!J
z{A8BtNDZTxUv`m1y+wWlYvTsWa?ySyA#`ew@CO(=|dq^(p1%ST=uL_ot_X=&SUKHPk}
zA781_eQSSnGW(W{eZg=(H9SU+bx6-%=ae8>XlR_g{>wI@6ztqHxS0`pSU&?3+m}^M
zAsN!3*RUzFF)wQSWBZFm3UcE0KdlIHbN<jUpkp?3Y
zGnFolJ_i+cf7tep3q`W0r0^F7@Q;P?1rQomyDb-r<@&|XMZ*b{^jdPz$&ytI6NIRl
z9+|;WbY|V&*w$KZwx~d@q>j3Cl;~MO%ih>U`1$$l$^Q8)vtek_7Ndq}t~a{=4|Tol
zh}VU_r;(qWD#B~Gr+jUujHk_xbx=SPl0UAw)%Q8D$+5=#;&WZ?fw$wt*WHsIVDv2*gFq&ER1fDAJkn-^%m?TF=u)omFdrSz@qmQ~Jn
zdx^HkNWBS@XX-Z4-IOyU8A4$URJI|f^W-vW$da{KIhQlc*cw@Slv;Ads}J(LVa5d9`ynx}4v(zk}X9HLWJdc90Ry
z$Jq;b#Ns4|@&uqK)Lac%h77;iR_DoWM-ax-6*?E&dxv|hIvxN$kV6260LTyZFFJrfg7kL?1ZPnf3bZvvc>VK|lM1ZX96
ztPRE4?6&jUy`e5pa^fiYl^w*&qVe}z;$SeBC!m>P
zzd<4H^z93@M=7-3W9Fe}@oy$c^!^mJQ_fy9ljn1>Ja?~tIIY}Ne?&L|+E}N`Cavy>
zRr8(yAD@Gv^0k8GGr@2Wjel&TT9ql2YtUO-~XX!VpFA*E*{@I!>naV6#P
zXMpykss7nMzNyY~as^3Py{7X;d*QNBMr2D?*B0vt3+#>K2;l_S&NxO?UHH^ve{H62
zOKnQ(vDH}jcG%Q=ixEbuqLmjzt~_?$joewgXq4@gNG9Nb14b)!L;26Q`oC%D(A^P`Q!!syacakaGi
z?LYcw|1wPNOON+gQ;+w@$AXXyj33?Wq{>ev6S`T;a;*#!ZM^d!&~ldHs$Ua4KPam+
z*``$UtCDi6PE}kPPErblJr_x;H_wd%s>GCYg(h_Nkt6q})FU#!?K1Cq_bBa=Q+zBh
zfB^mgv&PxJ!Qgv|qQRkugK56A?!n-ov;eA|)xV@cdj@L`CUl``2lI$dUU
zOXfj0+n^M|XHz;>YAvtSnqL@s9x{5xOev?4yZuJ7c5C`gG4#A?v2jLyQ+tQrWL4vf
zsWzb!$o5PTo>G0;HiMWstZ?jb3u|AGY~OY4nq!@EZWu&1>AkrN;7Re5C?$I~%yio^
z)8+5DSt9e&zI(`kqgpb>$k+uYlHJg&se)YJS!-dt31z<@MWn|5VJQ0%RRg?JAD!Q-ug>4F@xL8;_eCDZRP9yw
zwkMNbzm;!1E=hGJ#0Xj%Ff{iqx4KPT&ixGzQqh$`#0g=kvE1%aqsg+iftn^&{nh>q
z44^}H)$P|+Ym6ZhVrsNRYTB+JaF;QGf7_pUkJp&EB@2jKq^*yS(Zi2-%+L8FMswtN
zlly;KbKpEvT~((-z%(xoCVuixYULKHk3Rg|Sk`wEvj2EEC8NhWAH48+RH@KOn7T
zuQV;fNn~b8LoP~ze3;RHn6(;Ln96er&`niV!zs0
zBQNW+QOjq9drHj>2w>k1wJ>n`7WO!>wRA!~Vx?sS`a@R;<~E~kK7!g!wsh?-G(X*z
zpbH#Wb_Ctz-hQ
zdWHJz_qiJ+%WH+2WviR1+^b$;V813-mSmJCjP_waL`(oqnHK+PHh#}A>ioA&U$L|1
zik$g4xXeDSZg3>c-OEYZnXrKW!nZB<@FU8f{M*nrA_M&|Xq$MM4+1H`*Ow_q6ilWY
z?74uW=9RT$z-A5k-za1R)C4fPv()NBL(_xf3V(7SQdnE0J{9J?@@VF3YqVrmkVw_6
zj4yC~C8Cc)s^6
zN+MWtz*O(9rp%y`^*nTmV8Ie5Des~*{xN!m))Y*lNnDSH2&@*sk>z>Fdl=@fyNX>c
zOTur4$g?2TPMt6XLvyVr&k(Z7I}3D-t-^ygIbsx)yJcLl3!S=@?t}q~M}qf5;kmRP
zb7jgO2FqrJSrPE|B(?Jta2Bhpo^k47B#u+e5^PwR
z-wAy9@)vN0d1xO7dB<~*M&tzh9*ty^s+rI*IM8HEb7%Yd#y?Ye!Fit?{p3jv3qwLg
z{IeV1Y{^f*TD%wIDpH8dBMHjWXtNYJuQYw1NMGc6
zqWM-iemzTePN;1VzD-cbv@NUF!+jl=Nb94I^kv%lB-+r^3dN(5cM+z+#^qXb%brI?
zEw{qlkGAp85%Z}n=`CA)>>n;FziS9hwAhwtFJ<=%M7H+~6g2pZhv7db32C>9d2J@&
z!Hlg_-N|J&V_cRsjQIXBehMF6XdAxzvF#2&v>6?QtT9~ote%-J$S*u>hpa^QJ<^zU
zU4vSLxO?H<0?ALLWcOB-L-*n>w*r#}=W$-EP+C%pu_swQq9n|2-YB-TI3ARkdbJDP
zl-u245^DS5QiZI9Xbj2Uw+raP5-BgY(g_nG<}qcgQ
z;3sxkpRI7|;hVueeaiM3uA>Psv
z?@xENUD!7p|5Y^S)h;W+{U)`TRQkZ<#5UE
z+9IQg491(&OX5pBgm$CNN2I~~4hbvfW<~D{rUSwPV*5BgcYZ9#pK>~NpyfR1>9(LI
zB@25%N9dhl^vQbF<#hEKaVl&{Tt3ZC9Z>2AK2po;AwR|5U87Xe4!}LUp%O49Rwd!H2GtM(XlHhjsj6h|VU}Yh}Cl6sx@m!Ax
z5+xM_Bo$Bo27rNg)@sRV3WT11P!hq~&j}WLfjs-*qj8J!ds97TC9O8VpTenCkT>nF_WPxNT-37Lw4=X=oK6fC{!A_GeEC>$%vu9dT_%o*wa=mknS3_nhXcFL4f_TP6?D&U5)v3L
zMCH^X);wpgrT(`lx4}57!y;ycjL>4^kHmNyh_6m$jO9@tnRe>iFP+ae1izIrg8Q#VxA-3GZ|NBmwT_jd3F%nN~`Y23*z8A$`tffcDuMD
zWs4W9Ba8a`Ws=aPn9lQCoMZpSf_&}N1N?D{Y0U+ChVn+amUE>3wRKX&HWP+&i8R~@
z*I7d2#~&so7RK;w*Es
zSzyoY@7S
z*5K7TE%M*-#xmr!)=C&JsSA)h26+f)2uaA|@~-B#kd>~Wte`5Gy&>neHCAwLiv`
zXm*F{PUog+EhxKtM2KdlDCg5#bp^`p8FX#ews>_BZMv87T*pI)WD;-;#RZq9w#k5B
zM_9lw(_`M41On&Gsr!kwqL!s~)t4Sguoyq|sL
z?L4Y*c6m<8YaEDkBjC`Js;|s=QiCd^JfJg*8gGgJW&1~+nEBG@Ek@vi0j9XAXK_>6
z9*2{m$&`u+MK;HrUSqYomXtf?kEr#lNfzvz_&|pCFQ|QlY9#HYba{v*68%FZA#kTK
z#nP_LTa+5P8jqW89f&j#Ki6JtY-**23?$v!bPDr+A?mYAo)kjK{Z68!?6z*8{9u%m
zUD>{WOr`y@7FhqUvi7?oq#n#vM|ovyWf&jnTkV6Eg8WO@KuCmtz}
z+!CWP?z0EG(FIhi+sso!NxzkgaJUVcR;UZT)e;4Nk!-ycwf{ZE(stb?M`UTAR3e+i
zX1OidSV8aYR{X)cApQs5Js{A)NXC?|lXRSZAu}^2%62;2K(UzJ^Df>1tpxJ$&7PBk
zv@Hc{WP;@><*Kmgc^8!7)1uKU`?QvVq~DLq!-z$fLvhpd+v$C7fef2f#Xy=78n&XE
z`}<0(fwiopScu^dqO_vDyzZsWCwQm|yTAh7%~;&-UhtY==|y
zfy+RW9d=XV+w)}I;HMKvrv$f>7nyv7hgM2z(sjn$Jf_`xkr_cfdzO+41>HF&UBp0S
z7DG=G)OVjm`swHVSGOKJQq+adzu`h6I+AuOnm2uy+lt`XlRTTAExN)Vl@$`yQYg@C
zSfKFy&LwN~YLp`6BqC_uYAs%Bf6v=ys9}lFa^T58Ro6kXn}^)z4|ugGA+n)OcK+iece9mC)nfD4WfK9&sj0G8|
z6;XCFzsr52jV#9y>
z18QV5ySJAv`rEqzR{kjjn5R-lC7GE`Qyd!<{@_l7ZxnaeoS|g;e)6)dxORP03Vo
zAfjZzg(E#ifA5{02R_a|4AzOC761l`IQD3HCI~o~MN_@!!{Ofr?;)XKt|7njUUZeO
zeOjc-&jaN8U60sh1xyGMXGp)z&;HvmF*1}&Z9^pDjgpN+4&R~lXReC4*y@GuA_631
znk_E~)JkV|H~}y8mcnde9|hzZn<~Utg3}ZI=d3o@tH@1l)Nw&0A5hA#OtCcVF^
z6$rVq0k+{qgzTG}1TvQE?2b8I%4$+tTNFHwf8QT{38VwOs-fuW($db{EIdd%wEH~%
zyT&IVqeX~qXOb21^ziS?&7$$>=$Ahk$0J9T-mConK?*GH6NA>^RYP5^fbrm?`VAw>
zO?FcP3h%pBrIWc?i>yP@Ldeq2ETFW=udVkG@QJAaYI#w2E2X;U^i^Wy2yFlwCcy`r
zU%z{7lhB4x8tvb_Wm2xa@Kf7_`t&F2XqmBc%BbY|!;sa6zBhlTs5DM~k}y;%1}W-B
zXP`J?mhBKYE0l8>2#0q+fha7t3=`9u5B^03o&BIVQ$?jZ)M8
zQ2G-f$vL0x9u^noH59-f!W1D3{keD$I1?0Zf@KWw6HamSue8wZiF>uK6$b8PN_t4h
zvjt3A@xTSflh;fj(IqNzdt%iOl`|6Jm4%mT<}gq#7;9_jU34+GMz~ZI#dZqw9{Wwe`~qHIW%gMlS_Nv
z2fQIWgpE>BsxTnzz_3=uP38D_c`Q<$17HM21CnZg`VYesuhn+qI5fwgkQ&!=B+bjO
zbDnKXmShyHwlo;|rWdk2{VMN2cLB8L)D=i;OM#uyECCA5q`m6Dh9|j?{`JF&&%bn6
zTp7ThCfdgp)`C_R0QPjMfOFM&Oj#G@GdzKui8wDUsHJ9NfT%z1;9pKdbrqm0ri$3M
zm?f-%gq*p}8+ybZ`eyjkwON#}1k(!$TIK4R4=upICrkltxZZ+71gTH
z4}kU4P+x=>0CE6V?ks3UY0(4C*qzBH@N2y=Q@UrlH5(o*eQG+|Z*MOAPyah2W~fr8
z0MH|v-TOYq8UWZk=1ldbG`sxu%$a&p71BTs5Ex4rV*fEhaC0A>b|k-*^4BNRC9xjZ
z!i4}Eu-
zL3JW}Xz)Sm9l#qaEf&;7#4cHyB6a0tDXuuFzV9=i89Y)yu~AIWwO)W|AL_S96Jri(
zN}#uI?e$M!)uI)Z0fQ7LaNpYkTs@jK1gk*88R~p6vyBV@QOx6hN#Ay2Fy7l@)ixaY
z2B#p&!{mlF#%9QdQ>X8ulum9)1Zge_vglM#B6bxW*7CA!76n-@hX;MW)4i@%O
z9WTdAbE}sZ?R*0mo18QvUSDjqP;7&6CFblE^!8kBO$Xk(VEu5$u9ZmX3D8E`=;q&W
zqOC&o)*b2mBf7*rfwVQpYFoMiN*ow39-Q1IvP_8BmE9ccN8_fcG3l6&`N@zZ**qitqg<${>{yb)shHH
z`+2U~IM-5|I0OwK<9>WB7ZUi)5Q6dqd%W*{G?A_C&h+jsGJ9CU3fL_m6ThFN18cWG
zyP0|`dyiAAE3r?H)6G~9RKhBeW(iWu$xDj%F+jWc7uf$^e*D?h8>{J7YHT3>%TAph
zZyg9*GrE?SOD^SH++O02-mASB5#58D7D+6#?XJ`e2)0c9#s{D3vidQgRM^j@NdHO){T_m{x{U3zfa28
z5JE)ZsmylWG+!QbW)rYqKs32y<~hZqxne?MVq;lAg<x2_^&obA1j{~=*y_K
zVONXVYZnc_-0htM79I2?JNHj7PKWj$C1w%BgR?6!9Q~^-NF!lP?CBUm$GGa~Y|1-X
z0ByzF1%|1?2G8N&9yWIXpT~Sa!hq?5qJeIlA!qAP!0~yHkfz^;MyyI}OZofI?$f*!2a--1h)91jIG6&PCre(g`#YUoswy
zo4aa1Hn7g;hm~gxNdbossoNnT;~Knu`AtaP`mew4^!j=0t3L4
zfIm0FlE;h5xk*PS1s4pE
z0*z$=T859HXk*0)c()a}7R4~+DPDbotFB8$ol_NL2_8L+C2$HWvi%`bSXQ}UGnNKR
z^?Iu$ybT}MbG7fWZ8EO1O*9A?$M>{%crkk^UO)~_C*Eu?j9vm~W#I~{q9DE#LE=&Z
zydp|PmY@EMc;7P)Hw4lPrp3+HCV6g5PU$VDs;(N*ozoUaCm^c?MtULmsuaLgFy~xj
z_EE8^hx9TNhX-rkDQm-30}iD{-2(G1H+)u$wer3-rDG;+<~Na?xc;y>bgb53fB%GN
zyMidCze>1zh}SUcckPZJGxQi(5AH(Z)SRZ&I}Je=z5tG1&d1p$Xd$%B^8LedixsWG
zEEET}bpS$I5sE+YE>1z!@s_KSVyM;lp~#>Oictx=7AzYlx?krvzy|aONyDi&rio
zSr+Y8W$#_yGy8O?Yne^lR^FqL#f0-5Hz$gN1UStpu<)&dKpcknp#yV2gr{|>D`ZLh
z>b?baRT@EK%Ww|WUHAk`c7RtS)b;XB{R-C%rRq__m8w4!y80d_Yx1A@77Pj2vT|&Q
zJn%fO*9AkC1|Jvba&NR?7Cy4z#OGj-_&wwuBxQ0=hxnj2=kpZ8@u2mlaBrq&SXF7M
zNkEEMhdO@}LJdgGFYX%b-~I0C50|y@#t6ase;23>G_LneZ?NUe|E909snk`C7}nco
z9TM9OPI=!7E`=HJY)$7Z?oK)hVKls6kVyu;u*B(Vf2ST`D!i_H@L6c--p-RxEK);3i8{Ig51692msum@3)P
z0_U0%AT
zT%A@pS)nD@kzfi;&8L%y)M3ph^L4T)AQ}iHo5=y@Y-;wL=(|l|WM*u)OBUWP3zyeR
zP<)`4d~v3f{!LJnY#h|J{~1V$(b;;n7qEUdQ+n_!IIvqWo2oPZk1k!$Kd9?7AdF5n
zF6@ZnI;ax9!KD>vp3PPu_#HbkQ;o!Z(TVF74hAWD6OBnkN)Vd+)o#e+8V?}Z5VvlR
z*#ToJ%K3}aHaw*Xk(plJ9*~qQiM5@Y{3Y>sGwGHr%&381q31^6NoznU|D^!ER+5q9HzN0VbI*7)Tb!wM4P2tH_@K_SEOY8yz7DXJNZHzRKzb
zPciBaQdP9ltg$_gj9l2K0S>Xncfv=z8z2DFfM3ND_t?rpfJ+u53w4Ll$Ao~5cv1*R
z*n}4Qp9Q+BLj+^HfshYy8fvaYZ+MSIR3cJc29F5nllKNBir=%28uGT;wK~Hs6s;&=
zA6o>dfu6JguK1jAqJ2`VNgPH6=x9JBv~%
z)EiiBtB5(pzA0V6WuEF~`@N=la3w6a;Z5qfmf$GnBHk9&MV6<9toT)0{vV0yS$>#F=GN}
z;=pUTw%bXDAPL2f3ykr(?ih<1R!3DHbDKAO@5E5>CFw&yE2L|d@hfb#NS#Rki%bQpJ807>+a#<57f-Je3<04Ko|${=3n3i
z#Mx?O;DOaifBES?#$bVb?)9&S-Tq5_DF>hbbY5z__fOLAvf1@aUJ;zggX8+nF@Dd-
zYie&xW|U1iF3HSY_rwIKOmdex=ED!%@QBAPseZM;kIyLrBnZ7-4O+|$Ovv>>C7(#117&IHGf>TmkJBM>PMNT1$OJ?k@w=|v(XQs^
z_Y*yixr?LZ~@Eg;_p2>RxAM9#P!S#pznSMLSTF}$*K^9)`%1N+*NW%sR&UU;1gtRjDe53n*1I26=j0>fprg_cqa
zFCi)k%mLm`bxPje@13abF#gnvguGe`Y{I;@l`iKty;BKhY|opI?yi$RnUcdvI1=va
z>XLD~tDP-XUA`X9TC1Wd7^&Ns2e$}Y3r@UCb}OC*M$7oWp;q=pCv1rg?IYNdpu3~
zaPw^m+inVsLq=G|jJ)0*_~3OI8MgewEbrP9r?VK@e9Xr^(I5P^2^MtEyD8ik`)?92
zZY6>7WGTNhR#EM7(q=;W7NGP#gK;}p6}WI{m2)B4xcrd1Akq2<<+ww~e70~pwT%RqkOjXy&{)Hm>ek#@pq28I?3U0KOXObS(Qjz*7^j(f#df
zn2JjEki$J0ny_8T%HOwyR631Y--Y$>)}CqA)2;3?ZI#wxfQ3^{=V}i|=KRp<2td-W
zA1Y18{=4z{aLq$T%6kZgj4Mo4(fytK{veTw`R^j2n;^yi+{#jg)Uc%KyUHqwA{FZ0
zn|R?YvvOU(9&Pr`)Tk+mOyo?TYt*V=L>}B;D*Fw99DP%F@3+!Bb@USD7$r{K?5fIDvafp6skv*SNr&qZ#^7t62n$)$y1XT1Vz
zi`9RBvEwh1wSoV7f>~w&D>|<+X8*sa=~y0+w|~T(U|q}d`E`FxR9qE2hA}8lsITSd
zIBE`$ytMQ2pzErE3F@JuzdlS?VIaHcb#K_1t@Oi7TgU$Nzp4P=BrcjiQV(M)R7w9s
z00>@XfJDGmSeqMsJxRc-2tw1C$4r5?G)>nN6$o-n+GE{snr*Q(RN)Tc^Eb
zV{j}A(xCpoIGYcq{LJE3c@Bm0?<}yp4-t2c2*ep=4m6KH2FQqy1E_Q3A3gomVlMO>
z-nu%F7PO5j77F7;vHB6L-~NAakT+-GTz{5?
z3QK`Av*t(Yo{e7&BJ2z6rGsF{yFVw?E{HHbY1{pTg@c9(@4>YaJFpoySqvQjNiwS7
zK1(more#~b8TD4mg&UMp<+_pSWIt2khlNTg
z3=bfSuFK;faKhzr&^M(yuFO
zf7($T7I|TRkhm3;uJxIly7)HIAN&~Se78wrwQKf_!1Sh%By?jyA?AbOz;bqvxVa|uwOaG;$(NriQL$KoQ!FIFU=mBz6D
zB7e->@+yL1$G`5aW9+U)d^I@tH5O}M+M)Q^pO;Xa?Ry2B?Uvj0Bn?*v@PF1FBt~Y+
zUPe2HWY<&A1MDwFfTW+kZm}GOd9q^6J#yfAqNGqM7nf`*L-wV;?V{
zPTos8mu1c?*5cKH=QB(@o9k3jk1%%Y@4`Oy
z@Gwjl>^^YYF_n(+?t#av<
zO^BO`Zar&_Q>{jd+qYOPXN!(q4wgb@wO_yeifhiQzRR)=@sMj;G+E#|{>v51@oZa#
zZ;wWLW&(qVTYmo?Uvs#T4V*8h0Cd*g81L|L+RJy}!`$PSmk1?+IEI{CB9&OSYBFkV
ztw+2^>VI%eJLmHbEd-AX_3ngvyjmT~pO{1Zl%lmq=>GcE#?%hBfY!~W#-avsUeylv
zvzdM@d<_sYd`H`|(4!#CU@LmeaU6Q@$<@jjlD$T)gv-jwsh7at@F2STzAsY`U)XMK
z)+fH);t%zH@6O0zF<$ET&R6A|W0o4#8;b64M3&%Q0groip`;1ollG9p%|N`@=JI$mHM3?&BoAv!kvU#1_orm
z>xifAn>1p){6aSEcPeE|1b7@m25@WFx2IT_yS6nh$yhIwFoD^e;&<)+p?eLQWKIRO
zhBD{&$G?5o7+;=7GSH8Gt|j{_oN0$jy4COJO(m2zMNQaRcTQc(GnpDmD|7I(F6cJW
z!yT;RPI%OR!T?SX(&QQ=BI1Jx^CWpKu**dn~V@pio(RM=eX$LP0@h*6#gg{k;)+
zd3h!hauv_B*X41a1RCFE;07=+?#7%it(um3g2z56Mo7m@$o-=J4auTyL!1do}QhyjlM*=JIeK(B-6P?^e
zfXW7s`3=R*N@nFGm(=t=QGuuR_7ODH)hf4U*GjJ}o`DjKnItbEhIazsAjmGP_sY$AQZRAUp5Io>g@fM#8~9jB)x)
zr3CSv%X}OC0*~2c1E&K9FU~74p?I+~&uXib)i0YR?6e&sg-<|GS$lKRSL?&z2K`@x
zu>){eW5Z3gac|wQVZ*6l%WK#1`zCbkq8KPMql8*mhi<0T$N0-H!6_JFC`)wWD!}qn
zDy7z~!x#5m8UQ*Zl}Q;UoLN;>WaV+#e_YpYx@4*#nCDR^Pra9fxcXsz?L(GxeBFTf
zL5EVADlC%8lcE2Sa55;)5_#^|LKH&PJ3ZjF*l&v7$IDN*x@_DLqFA?8YB1&~kc`3f
zg6&a|Uhy*))a2OQ0p{q<$2CY16HThQtl?rpvdbra`obSbq!D2LUjST^>#gyhFTHG%
zFz)E(L6*%0*VdtW;{BS?FTRz}FmeitWYzU3Mk|0kcQLA*o5tQNdF=*zkxW!g3tzg&yRUon)TutuXmUw2cP2XgV9zO)W)*fD42~FD>HR`glWP?f
zqir)%($;owz};ydo$R*j*2BQq8H#lj+1=4RD-O{&5UMlNJHegubD7v
zlQJWTB75S$D>LvhF`lv{9j0qirz%$vJ
zu>JMC;)enWX4wUlm3_#zb{fBwv(bA1yPY31%%yfkCRl}O7$>n5eR>)I%upj4-ba}q
ztAk^QVuEQUvpolI-Uv@<`v5_eARQ&5o;_oJBp|^^EaLWdc>o{S8JRy?+k`_|q0Y?J
z9}%&)6sJ$WI2@rJm
zuy|^IR@n5)*n&xlZncxRiD_(l3T4tXpeRJdI)gQDd6+d9-4EKP+%63G+;|0ySd&2Y
zc%c(`4G>zc!z}kO)SG`VL{9$Qg9UyIk8#gKBT(xab&npLY}mjjv+8G}S`Ac{49wPhA~T4^f+SXNK=
z$-~}+iIYpM=I|?lp3U%qbw{B$nVbg0#|0wdf6B%P5_*%x-XlF_oBh87XoTX1hH#GaH2E7U27Gm~fQ@)7M1%ka~?(B=u
zACLz_#M}IWV8m9yX(ID~kn_O9AtJ$uqRE|**{u?AAGHg@KO~FvX32%Nw$l|+opHI%
zzEzQKj8+%NqrNXd+SDIu&4xlg7AiHC3UK;hqt5^Hjgq6y(0m6ru-W!-D1}`VSASD;
z(Jph7d{8!)Uby+ARFZ5+wKQOiPMV?wC(Z1t_*4gB#v`IG$=1x0^pgWfa=KDRQ;>PqvdM@z+qmX?<7
z8IkF-pY?MbRzDKK$ZA}s%yS?hvX0+ZyUD_!&;
z1_!HqetEH_=c;R|lbPVB#hm^Qk}ZeSea_!Zi}ZJkrNI1d{k%Phtk;b@
zJ8?w6R+Q6CL~h0c%~+ZD)*av~A>OY8zm(eV4(PaJG;)&Q
zzge4)mzhh9Lm623@Q}EjG)BgY}
C8OVPC
From 9b54d3304684a5e77a5bc41f7389ff7253e435cc Mon Sep 17 00:00:00 2001
From: Yauheni Kachan <19803638+bagxi@users.noreply.github.com>
Date: Tue, 7 Apr 2020 18:42:06 +0300
Subject: [PATCH 12/12] fix codestyle
---
scripts/process_instance_masks.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/scripts/process_instance_masks.py b/scripts/process_instance_masks.py
index b063987..c64c7b6 100644
--- a/scripts/process_instance_masks.py
+++ b/scripts/process_instance_masks.py
@@ -56,7 +56,9 @@ def mim_interaction(mim: List[np.ndarray], threshold: float = 0) -> np.ndarray:
return result
-def mim_color_encode(mim: List[np.ndarray], threshold: float = 0) -> np.ndarray:
+def mim_color_encode(
+ mim: List[np.ndarray], threshold: float = 0
+) -> np.ndarray:
result = np.zeros_like(mim[0], dtype=np.uint8)
for index, im in enumerate(mim, start=1):
result[im > threshold] = index