Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pip install -r requirements.txt
Once you have NHL recap screenshosts saved on your computer, you must first resize them before creating the labels. To do so, use the following script by specifying the path of the folder where your images are saved:

```
python -m src.utils.resize_images --path your_path
python -m src.data_creation.resize_images --path your_path
```

It should save all the `*.png` files inside that folder in the correct dimensions.
Expand Down Expand Up @@ -63,7 +63,7 @@ Once you launched the labeling tool, you are up and running to import you resize
The last part before splitting the dataset and running the model is to extract the XML from the labeling tool and split it into separate XMLs (one for each file). To split the XML downloaded from the labeling tool:

```
python -m src.parser.xml_splitter --file path_xml_file --dir dir_save_xmls
python -m src.data_creation.parser.xml_splitter --file path_xml_file --dir dir_save_xmls
```

The very last step is to add the resized images and the accompagning XML to the `data/raw/` directory and push it to the repo.
Expand Down
8 changes: 0 additions & 8 deletions data/raw/image_train.txt

This file was deleted.

4 changes: 0 additions & 4 deletions data/raw/image_val.txt

This file was deleted.

8 changes: 0 additions & 8 deletions data/raw/xml_train.txt

This file was deleted.

4 changes: 0 additions & 4 deletions data/raw/xml_val.txt

This file was deleted.

2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
numpy
torch
torch==1.4.0
elementpath
optparse-pretty
python-resize-image
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ def readfile(name):

def savefile(classe, name):
with open('{}.pkl'.format(name), 'wb') as fich:
fich.write(pickle.dumps(classe, pickle.HIGHEST_PROTOCOL))
fich.write(pickle.dumps(classe, pickle.HIGHEST_PROTOCOL))
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import numpy as np
import matplotlib.pyplot as plt
import mahotas
from src.semantic.parser.xml_parser import parse_xml_data
from src.semantic.net_parameters import p_label_to_int
from src.data_creation.parser.xml_parser import parse_xml_data
from PIL import Image

LABEL_TO_INT = {'ice': 1, 'board': 2, 'circlezone': 3, 'circlemid': 4, 'goal': 5, 'blue': 6, 'red': 7, 'fo': 8}


class CreateLabel:
def __init__(self, path_xml, path_image):
Expand Down Expand Up @@ -42,8 +43,8 @@ def get_label(self):
poly = points[i]
x, y = zip(*CreateLabel.render(poly))
for k in range(len(y)):
if p_label_to_int[labels[i]] > frame_image[x[k]][y[k]]:
frame_image[x[k]][y[k]] = p_label_to_int[labels[i]]
if LABEL_TO_INT[labels[i]] > frame_image[x[k]][y[k]]:
frame_image[x[k]][y[k]] = LABEL_TO_INT[labels[i]]
self.frame_image = frame_image.transpose()
return frame_image.transpose()

Expand All @@ -54,7 +55,3 @@ def show_plot(self):
plt.imshow(self.frame_image)
plt.show()


#Label2 = CreateLabel(path_xml='./data/xml/test2_polygon.xml', path_image='./data/image/test2_polygon.png')
#label2_array = Label2.get_label()
#Label2.show_plot()
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ def split_xml(path_file, path_to):


def get_args():
parser = OptionParser()
parser.add_option('-f', '--file', type=str, dest='file',
parser = OptionParser()
parser.add_option('-f', '--file', type=str, dest='file',
help='File Path (including filename) of the XML.')
parser.add_option('-d', '--dir', type=str, dest='dir',
parser.add_option('-d', '--dir', type=str, dest='dir',
help='Directory to save the XMLs')

(options, args) = parser.parse_args()
return options
(options, args) = parser.parse_args()
return options


if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
from PIL import Image
from resizeimage import resizeimage

RESIZE_FORMAT = [512, 256]


def resize_images(path):
"""Function that resize png files inside a dir"""
Expand All @@ -12,7 +14,7 @@ def resize_images(path):
if file.endswith(".png"):
with open(os.path.join(path, file), 'r+b') as f:
with Image.open(f) as image:
cover = resizeimage.resize_thumbnail(image, [512, 256])
cover = resizeimage.resize_thumbnail(image, RESIZE_FORMAT)
new_name = os.path.join('resized_'+file)
cover.save(os.path.join(path, new_name), image.format)
print(file+' has been resized and saved.')
Expand Down
2 changes: 0 additions & 2 deletions src/semantic/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +0,0 @@
from src.semantic.training_function import train
import src.semantic.net_parameters
38 changes: 38 additions & 0 deletions src/semantic/create_data_training_setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import json
from pathlib import Path
from optparse import OptionParser

from src.semantic.modeling_data_creation.split_modeling_data import create_labels_from_dir
from src.semantic.dataloader.flip_images import flip_images


def get_args():
parser = OptionParser()
parser.add_option('-c', '--config', type=str, dest='config', default='src/semantic/training_config.json',
help='Config file to setup training')

(options, args) = parser.parse_args()
return options


def data_creation(config_file):
with open(config_file, "r") as f:
config = json.load(f)
data_parameters = config["data_parameters"]
# Split train and test in 3 different folders (and save arrays instead of XMLs)
create_labels_from_dir(
path_data=data_parameters["raw_data_path"],
path_to=data_parameters["data_creation_folder_path"],
train_test_perc=data_parameters["train_test_perc"],
train_valid_perc=data_parameters["train_valid_perc"],
max=data_parameters["max_image"]
)

if data_parameters["data_augmentation"]:
train_data_path = Path(data_parameters["data_creation_folder_path"], "train")
flip_images(train_data_path)


if __name__ == "__main__":
args = get_args()
data_creation(args.config)
6 changes: 1 addition & 5 deletions src/semantic/dataloader/dataset.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import numpy as np
import os

from PIL import Image
from torch.utils.data import Dataset

from src.semantic.utils.create_image_label import CreateLabel
from src.semantic.utils.utils import readfile
from src.data_creation.file_manager import readfile


def load_image(file):
Expand All @@ -16,8 +14,6 @@ class DataGenerator(Dataset):
def __init__(self, imagepath, labelpath, transform):
# make sure label match with image
self.transform = transform
#assert os.path.exists(imagepath), "{} not exists !".format(imagepath)
#assert os.path.exists(labelpath), "{} not exists !".format(labelpath)
self.image = imagepath
self.label = labelpath

Expand Down
8 changes: 4 additions & 4 deletions src/semantic/dataloader/flip_images.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import glob
from pathlib import Path
import numpy as np

from PIL import Image
from torchvision import transforms
from src.semantic.utils.utils import readfile, savefile
from src.data_creation.file_manager import readfile, savefile


def flip_images(path_data):

images = glob.glob(path_data + '*.png')
labels = glob.glob(path_data + '*.pkl')
images = glob.glob(str(Path(path_data, '*.png')))
labels = glob.glob(str(Path(path_data, '*.pkl')))

preprocess_flip = transforms.Compose([
transforms.RandomHorizontalFlip(1)
Expand All @@ -22,7 +23,6 @@ def flip_images(path_data):
image_flip = preprocess_flip(image_flip)
image_flip.save(image.replace('image', 'rimage'))


for label in labels:
label_flip = readfile(label.replace('.pkl', ''))
label_flip = Image.fromarray(label_flip)
Expand Down
60 changes: 0 additions & 60 deletions src/semantic/history.py

This file was deleted.

Empty file added src/semantic/model/__init__.py
Empty file.
Empty file.
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import torch.nn.functional as F
from src.semantic.unet.unet_utils import *
from src.semantic.model.unet.unet_utils import *


class UNet(nn.Module):
def __init__(self, n_channels, n_classes):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.semantic.net_parameters import p_bilinear


class double_conv(nn.Module):
Expand Down Expand Up @@ -47,7 +46,7 @@ def forward(self, x):


class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=p_bilinear):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()

# would be a nice idea if the upsampling could be learned too,
Expand Down
Empty file.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import torch
import torch.nn as nn

from src.semantic.net_parameters import p_number_of_classes

NUMBER_OF_CLASSES = 9


model_urls = {
Expand Down Expand Up @@ -56,5 +57,5 @@
adapt_state_dict["features.19.bias"] = ori_state_dict["features.21.bias"]
adapt_state_dict["features.19.running_mean"] = ori_state_dict["features.21.running_mean"]
adapt_state_dict["features.19.running_var"] = ori_state_dict["features.21.running_var"]
adapt_state_dict["conv_out.weight"] = nn.init.normal(torch.zeros((p_number_of_classes, 256, 1, 1)), 0)
adapt_state_dict["conv_out.weight"] = nn.init.normal(torch.zeros((NUMBER_OF_CLASSES, 256, 1, 1)), 0)
adapt_state_dict["conv_out.bias"] = torch.zeros(p_number_of_classes)
Empty file.
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import glob
import math
import os
from pathlib import Path
import numpy as np

from shutil import copyfile
from src.semantic.utils.create_image_label import CreateLabel
from src.semantic.utils.utils import savefile
from src.data_creation.label_creation.create_image_label import CreateLabel
from src.data_creation.file_manager import savefile


def create_labels_from_dir(path_data, path_to, train_test_perc=0.8, train_valid_perc=0.8, shuffle=True, max=None):
Expand All @@ -21,8 +22,8 @@ def create_labels_from_dir(path_data, path_to, train_test_perc=0.8, train_valid_
The XML files are created using cvat tool (see labeling-tool/)
"""

images = glob.glob(path_data + '*.png')
xml = glob.glob(path_data + '*.xml')
images = glob.glob(str(Path(path_data, '*.png')))
xml = glob.glob(str(Path(path_data, '*.xml')))

images.sort()
xml.sort()
Expand All @@ -41,21 +42,19 @@ def create_labels_from_dir(path_data, path_to, train_test_perc=0.8, train_valid_
train_idx, test_idx = indices[:split], indices[split:]

nb_images_train = len(train_idx)
indices_train = np.arange(nb_images_train)

if shuffle:
np.random.shuffle(indices_train)

split_train = math.floor(train_valid_perc * nb_images_train)
train_idx, valid_idx = indices_train[:split_train], indices_train[split_train:]
train_idx, valid_idx = train_idx[:split_train], train_idx[split_train:]

if max is not None:
train_idx = train_idx[:max]
valid_idx = valid_idx[:max]
test_idx = test_idx[:max]

# Create new folders for train and test datasets
os.mkdir(path_to + 'train/')
os.mkdir(path_to + 'valid/')
os.mkdir(path_to + 'test/')
os.mkdir(str(Path(path_to, 'train')))
os.mkdir(str(Path(path_to, 'valid')))
os.mkdir(str(Path(path_to, 'test')))

for id in train_idx:
filename_png = images[id].split('/')[-1]
Expand Down
Loading