### Support Vector Machine Optimization in Python part 2

https://pythonprogramming.net/svm-optimization-python-2-machine-learning-tutorial/

```
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
# build SVM class
class Support_Vector_Machine:
# The __init__ method of a class is one that runs whenever an object is created with the class
# calling self in the class allows sharing of variables across the class, so is included in all function defs
def __init__(self, visualisation=True):
# sets visualisations to what ever the user specifies (defaults to True)
self.visualisation = visualisation
# defines colours for the two states 1 & -1
self.colors = {1:'r', -1:'b'}
# sets some standards for the graphs
if self.visualisation:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
# train
def fit(self, data):
# set up access to the data that's passed when the function is called
self.data = data
# { ||w||: [w,b] }
opt_dict = {}
#
transforms = [[1,1],
[-1,1],
[-1,-1],
[1,-1]]
# finding values to work with for our ranges.
all_data = [] # set up a placeholder for the values
# for loop to step through data and append it to all_data (list of values)
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
all_data.append(feature)
# next define the max and min value in list
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
# free up memory once we've got the values
all_data=None
# define step size for optimisation Big through to small
step_sizes = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
# starts getting very high cost after this.
self.max_feature_value * 0.001]
# extremely expensive
b_range_multiple = 5
b_multiple = 5
# first element in vector w
latest_optimum = self.max_feature_value*10
## Begin the stepping process
for step in step_sizes:
w = np.array([latest_optimum,latest_optimum])
# we can do this because convex
optimized = False
while not optimized:
# we're not optimising b as much as w (not needed)
for b in np.arange(-1*(self.max_feature_value*b_range_multiple),
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transformation in transforms:
w_t = w*transformation
found_option = True
# weakest link in the SVM fundamentally
# SMO attempts to fix this a bit
# yi(xi.w+b) >= 1
#
# #### add a break here later..
for i in self.data:
for xi in self.data[i]:
yi=i
if not yi*(np.dot(w_t,xi)+b) >= 1:
found_option = False
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t,b]
if w[0]<0:
optimized = True
print('optimised a step')
else:
w = w - step
# break out of while loop
# take a list of the magnitudes and sort them
norms = sorted([n for n in opt_dict]) # sorting lowest to highest
#||w|| : [w,b]
opt_choice = opt_dict[norms[0]] # smallest magnitude
self.w = opt_choice[0] # sets w to first element in the smallest mag
self.b = opt_choice[1] # sets b to second element in the smallest mag
latest_optimum = opt_choice[0][0]+step*2 # resetting the opt to the latest
def predict(self,features):
# sign( x.w+b )
classification = np.sign(np.dot(np.array(features),self.w)+self.b)
return classification
# define data dictionary
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8],]),
1:np.array([[5,1],
[6,-1],
[7,3],])}
```