-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathml25.py
More file actions
184 lines (148 loc) · 6.2 KB
/
ml25.py
File metadata and controls
184 lines (148 loc) · 6.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
class Support_Vector_Machine:
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
# train
def fit(self, data):
self.data = data
# { ||w||: [w,b]
opt_dict = {}
transforms = [[ 1, 1],
[-1, 1],
[-1,-1],
[ 1,-1]]
all_data = []
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
all_data.append(feature)
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data = None
# support vectors yi(xi.w+b) = 1
# 1.01 or max step
step_sizes = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
# point of calculation expense:
self.max_feature_value * 0.001,]
# self.max_feature_value * 0.0001,]
#/usr/bin/python3.6 /home/75228d/projects/github/python-tutorial/ml25.py
# Optimized a step.
# Optimized a step.
# Optimized a step.
# Optimized a step.
# [1 7] : 1.2224000000008668
# [2 8] : 1.2224000000008668
# [3 8] : 1.000000000001018
# [5 1] : 1.0015999999976204 GOOD
# [ 6 -1] : 1.6687999999971665
# [7 3] : 1.0015999999976204 GOOD
# extremely expensive
b_range_multiple = 2
# we dont need to take as small step of steps
# with b as we do magnitude of vector w
b_multiple = 5
latest_optimum = self.max_feature_value * 10
for step in step_sizes:
w = np.array([latest_optimum, latest_optimum])
# We can stop because no local minimum, only global because convex optimization
optimized = False
while not optimized:
for b in np.arange(-1*(self.max_feature_value*b_range_multiple),
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transformation in transforms:
w_t = w*transformation
found_option = True
# Weakest link in the SVM fundamentally
# SMO attempts to fix this a bit
# yi(xi.w+b) >= 1
for i in self.data:
for xi in self.data[i]:
yi = i
if not yi*(np.dot(w_t, xi)+b) >= 1:
found_option = False
# print(xi, ':', yi*(np.dot(w_t, xi)+b))
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t, b]
if w[0] < 0:
optimized = True
print('Optimized a step.')
else:
# minus step on a vector
w = w - step
norms = sorted([n for n in opt_dict])
opt_choice = opt_dict[norms[0]]
# opt_dict
# ||w||: [w, b]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]+step*2
# printing the optimum value found yi(xi.w+b) for each point
for i in self.data:
for xi in self.data[i]:
yi = i
print(xi, ':', yi*(np.dot(self.w, xi)+self.b))
def predict(self, features):
# sign( x.w+b )
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
if classification !=0 and self.visualization:
self.ax.scatter(features[0], features[1], s=200, marker='*', color=self.colors[classification])
return classification
def visualize(self):
[[self.ax.scatter(x[0], x[1], s=100, color=self.colors[i]) for x in data_dict[i]] for i in data_dict]
# hyperplane = x.w+b
# v = x.w+b
# positive support vector, v = 1
# negative support vector, v = -1
# decision boundary = 0
def hyperplane(x,w,b,v):
return (-w[0]*x-b+v) / w[1]
datarange = (self.min_feature_value*0.9,self.max_feature_value*1.1)
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
# (w.x+b) = 1
# positive support vector hyperplane
psv1 = hyperplane(hyp_x_min, self.w, self.b, 1)
psv2 = hyperplane(hyp_x_max, self.w, self.b, 1)
self.ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], self.colors[1])
# (w.x+b) = -1
# positive support vector hyperplane
nsv1 = hyperplane(hyp_x_min, self.w, self.b, -1)
nsv2 = hyperplane(hyp_x_max, self.w, self.b, -1)
self.ax.plot([hyp_x_min,hyp_x_max],[nsv1,nsv2], self.colors[-1])
# (w.x+b) = 0
# positive support vector hyperplane
db1 = hyperplane(hyp_x_min, self.w, self.b, 0)
db2 = hyperplane(hyp_x_max, self.w, self.b, 0)
self.ax.plot([hyp_x_min,hyp_x_max],[db1,db2], 'y--')
plt.show()
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8],]),
1:np.array([[5,1],
[6,-1],
[7,3],])}
# [[plt.scatter(ii[0],ii[1], color='b' if i == -1 else 'r') for ii in data_dict.get(i)] for i in data_dict]
# # [[plt.scatter(ii[0],ii[1], color=i == -1 and 'k' or 'g') for ii in data_dict.get(i)] for i in data_dict]
# plt.show()
svm = Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10],
[1,3],
[3,4],
[3,5],
[5,5],
[5,6],
[6,-5],
[5,8],]
for p in predict_us:
svm.predict(p)
svm.visualize()