forked from Moodstocks/gtsrb
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpreprocess.lua
More file actions
98 lines (77 loc) · 3.23 KB
/
preprocess.lua
File metadata and controls
98 lines (77 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
----------------------------------------------------------------------
-- This script contains the pre-processing steps needed to accelerate
-- the learning phase.
--
-- Prior to using this script, we need to generate the datasets with
-- createDataSet.lua, and load them with dataset.lua.
--
-- The Y channel of each image is preprocessed with global and local
-- contrast normalization
--
-- These results are based on Yann Lecun et al. work :
-- http://computer-vision-tjpn.googlecode.com/svn/trunk/documentation/
-- reference_papers/2-sermanet-ijcnn-11-mscnn.pdf
--
-- Hugo Duthil
----------------------------------------------------------------------
require 'torch' -- torch
require 'image' -- for image transforms
require 'dp' -- provides all sorts of preprocessing modules (LeCun LCN)
local script_dir = paths.dirname(paths.thisfile()).."/"
global_contrast_norm = not params.no_global_contrast_norm
local_contrast_norm = not params.no_local_contrast_norm
if train_set then
-- Global normalization and local contrast
-- normalization of Y channel
-- utility tables
local train_tensors_list = {}
local test_tensors_list = {}
-- define the normalization neighborhood:
local neighborhood = image.gaussian1D(7)
-- define the normalization operator
normalization = dp.LeCunLCN({progress = true, kernel = neighborhood, channels = {1}})
print("\nNormalization of training set")
-- per-example Y channel mean substraction
-- normalize for each exemple
for i = 1,train_set:size() do
local img = train_set[i][1][{{1}, {}, {}}]
if global_contrast_norm then
xlua.progress(i, train_set:size())
local mean = img:mean()
--mean substraction
img:add(-mean)
local std = img:std()
-- std division
img:div(std)
end
train_tensors_list[i] = torch.Tensor(img)
end
-- we need to reshape the input to feed the normalization operator
local train_set_tensor = nn.JoinTable(1):forward(train_tensors_list)
train_set_tensor = nn.Reshape(train_set:size(), 1, 32, 32):forward(train_set_tensor)
-- local contrast normalization of Y channel:
if local_contrast_norm then
normalization:apply(dp.ImageView("bchw", train_set_tensor))
end
print("Normalization of test set")
-- per-example mean substraction
for i = 1,test_set:size() do
local img = test_set[i][1][{{1}, {}, {}}]
if global_contrast_norm then
xlua.progress(i, test_set:size())
local mean = img:mean()
img:add(-mean)
local std = img:std()
img:div(std)
end
test_tensors_list[i] = torch.Tensor(img)
end
local test_set_tensor = nn.JoinTable(1):forward(test_tensors_list)
test_set_tensor = nn.Reshape(test_set:size(), 1, 32, 32):forward(test_set_tensor)
-- local contrast normalization of Y channel:
if local_contrast_norm then
normalization:apply(dp.ImageView("bchw", test_set_tensor))
end
else
print("Databases missing, please run createDataSet.lua to build the databases, and load them with dataset.lua")
end