Changeset 830
- Timestamp:
- 09/19/15 11:26:58 (8 months ago)
- Location:
- mystic
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
mystic/examples/test_svc1.py
r827 r830 14 14 from mystic.svctools import * 15 15 16 # a common objective function for solving a QP problem 17 # (see http://www.mathworks.com/help/optim/ug/quadprog.html) 18 def objective(x, H, f): 19 return 0.5 * dot(dot(x,H),x) + dot(f,x) 16 # define the objective function to match standard QP solver 17 # (see: http://www.mathworks.com/help/optim/ug/quadprog.html) 18 # the objective funciton is very similar to the dual for SVC 19 # (see: http://scikit-learn.org/stable/modules/svm.html#svc) 20 def objective(x, Q, b): 21 return 0.5 * dot(dot(x,Q),x) + dot(b,x) 20 22 23 # define the data points for each class 21 24 c1 = array([[0., 0.],[1., 0.],[ 0.2, 0.2],[0.,1.]]) 22 25 c2 = array([[0, 1.1], [1.1, 0.],[0, 1.5],[0.5,1.2],[0.8, 1.7]]) 23 26 24 # the Kernel Matrix (with the linear kernel) 25 # Q = multiply.outer(X,X) # NOTE: requires X is a list of scalars 27 # define the full data set 28 X = concatenate([c1,c2]); nx = X.shape[0] 29 # define the labels (+1 for c1; -1 for c2) 30 y = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 26 31 32 # build the Kernel Matrix (with the linear kernel) 33 # get the QP quadratic and linear terms 27 34 XX = concatenate([c1,-c2]) 28 nx = XX.shape[0] 35 Q = KernelMatrix(XX) # Q_ij = K(x_i, x_j) 36 b = -ones(nx) # b_i = 1 (in dual) 29 37 30 # quadratic and linear terms of QP 31 Q = KernelMatrix(XX) 32 b = -1 * ones(nx) 33 34 H = Q 35 f = b 36 Aeq = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 38 # build the constraints (y.T * x = 0.0) 39 # 1.0*x0 + 1.0*x1 + ... - 1.0*xN = 0.0 40 Aeq = y 37 41 Beq = array([0.]) 42 # set the bounds 38 43 lb = zeros(nx) 39 44 ub = 99999 * ones(nx) 40 45 46 # build the constraints operator 41 47 from mystic.symbolic import linear_symbolic, solve, \ 42 48 generate_solvers as solvers, generate_constraint as constraint … … 52 58 #mon = VerboseMonitor(1) 53 59 60 # solve the dual for alpha 54 61 from mystic.solvers import diffev 55 alpha = diffev(objective, zip(lb,ub), args=( H,f), npop=nx*3, gtol=200, \62 alpha = diffev(objective, zip(lb,ub), args=(Q,b), npop=nx*3, gtol=200, \ 56 63 # itermon=mon, \ 57 64 ftol=1e-8, bounds=zip(lb,ub), constraints=conserve, disp=1) … … 59 66 print 'solved x: ', alpha 60 67 print "constraint A*x == 0: ", inner(Aeq, alpha) 61 print "minimum 0.5*x' Hx + f'*x: ", objective(alpha, H, f)68 print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b) 62 69 63 # the labels and the points 64 X = concatenate([c1,c2]) 65 y = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 66 70 # calculate weight vectors, support vectors, and bias 67 71 wv = WeightVector(alpha, X, y) 68 72 sv1, sv2 = SupportVectors(alpha,y,eps=1e-6) … … 80 84 81 85 # plot data 82 pylab.plot(c1[:,0], c1[:,1], 'bo' )83 pylab.plot(c2[:,0], c2[:,1], 'yo' )86 pylab.plot(c1[:,0], c1[:,1], 'bo', markersize=5) 87 pylab.plot(c2[:,0], c2[:,1], 'yo', markersize=5) 84 88 85 89 # plot hyperplane: wv[0] x + wv[1] y + bias = 0 … … 89 93 pylab.plot(hx, hy, 'k-') 90 94 #pylab.axis([xmin,xmax,ymin,ymax]) 95 96 # plot the support points 97 pylab.plot(XX[sv1,0], XX[sv1,1], 'bo', markersize=8) 98 pylab.plot(-XX[sv2,0], -XX[sv2,1], 'yo', markersize=8) 99 #pylab.axis('equal') 91 100 pylab.show() 92 101 -
mystic/examples/test_svc2.py
r827 r830 17 17 import os.path 18 18 19 # a common objective function for solving a QP problem 20 # (see http://www.mathworks.com/help/optim/ug/quadprog.html) 21 def objective(x, H, f): 22 return 0.5 * dot(dot(x,H),x) + dot(f,x) 19 # define the objective function as the dual for SVC 20 # (see: http://scikit-learn.org/stable/modules/svm.html#svc) 21 # the objective funciton is very similar to the dual for SVC 22 # (see: http://scikit-learn.org/stable/modules/svm.html#svc) 23 def objective(x, Q, b): 24 return 0.5 * dot(dot(x,Q),x) + dot(b,x) 23 25 24 26 # SETTINGS … … 26 28 overlap = False # reduce the distance between the datasets 27 29 30 # define the data points for each class 28 31 c1 = loadtxt(os.path.join('DATA','g1.pts')) 29 32 c2 = loadtxt(os.path.join('DATA','g2.pts')) … … 34 37 if overlap: c1[:,0] += 3 # make the datasets overlap a little 35 38 36 # the Kernel Matrix (with the linear kernel) 37 # Q = multiply.outer(X,X) # NOTE: requires X is a list of scalars 39 # define the full data set 40 X = concatenate([c1,c2]); nx = X.shape[0] 41 # define the labels (+1 for c1; -1 for c2) 42 y = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 38 43 44 # build the Kernel Matrix (with the linear kernel) 45 # get the QP quadratic and linear terms 39 46 XX = concatenate([c1,-c2]) 40 nx = XX.shape[0] 47 Q = KernelMatrix(XX) # Q_ij = K(x_i, x_j) 48 b = -ones(nx) # b_i = 1 (in dual) 41 49 42 # quadratic and linear terms of QP 43 Q = KernelMatrix(XX) 44 b = -1 * ones(nx) 45 46 H = Q 47 f = b 48 Aeq = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 50 # build the constraints (y.T * x = 0.0) 51 # 1.0*x0 + 1.0*x1 + ... - 1.0*xN = 0.0 52 Aeq = y 49 53 Beq = array([0.]) 54 # set the bounds 50 55 lb = zeros(nx) 51 56 ub = 1 * ones(nx) 52 57 _b = .1 * ones(nx) # good starting value if most solved xi should be zero 53 58 59 # build the constraints operator 54 60 from mystic.symbolic import linear_symbolic, solve, \ 55 61 generate_solvers as solvers, generate_constraint as constraint … … 71 77 mon = VerboseMonitor(10) 72 78 79 # solve the dual for alpha 73 80 from mystic.solvers import diffev 74 alpha = diffev(objective, zip(lb,_b), args=( H,f), npop=nx*3, gtol=200,\81 alpha = diffev(objective, zip(lb,_b), args=(Q,b), npop=nx*3, gtol=200,\ 75 82 itermon=mon, \ 76 83 ftol=1e-8, bounds=zip(lb,ub), constraints=conserve, disp=1) … … 78 85 print 'solved x: ', alpha 79 86 print "constraint A*x == 0: ", inner(Aeq, alpha) 80 print "minimum 0.5*x' Hx + f'*x: ", objective(alpha, H, f)87 print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b) 81 88 82 # the labels and the points 83 X = concatenate([c1,c2]) 84 y = concatenate([ones(c1.shape[0]), -ones(c2.shape[0])]).reshape(1,nx) 85 89 # calculate weight vectors, support vectors, and bias 86 90 wv = WeightVector(alpha, X, y) 87 91 sv1, sv2 = SupportVectors(alpha,y,eps=1e-6) … … 110 114 111 115 # plot the support points 112 pylab.plot(XX[sv1,0], XX[sv1,1], 'b ^',markersize=10)113 pylab.plot(-XX[sv2,0], -XX[sv2,1], 'y ^',markersize=10)116 pylab.plot(XX[sv1,0], XX[sv1,1], 'bo', markersize=8) 117 pylab.plot(-XX[sv2,0], -XX[sv2,1], 'yo', markersize=8) 114 118 #pylab.axis('equal') 115 119 pylab.show() -
mystic/examples/test_svr1.py
r828 r830 14 14 from mystic.svmtools import * 15 15 16 # a common objective function for solving a QP problem17 # (see http://www.mathworks.com/help/optim/ug/quadprog.html)18 def objective(x, H, f):19 return 0.5 * dot(dot(x, H),x) + dot(f,x)16 # define the objective function to match standard QP solver 17 # (see: http://www.mathworks.com/help/optim/ug/quadprog.html) 18 def objective(x, Q, b): 19 return 0.5 * dot(dot(x,Q),x) + dot(b,x) 20 20 21 # linear data with scatter 22 x = arange(-5, 5.001) 23 y = x + 7*random.rand(x.size) 21 # define the data points (linear data with uniform scatter) 22 x = arange(-5, 5.001); nx = x.size 23 y = x + 7*random.rand(nx) 24 N = 2*nx 24 25 25 l = x.size 26 N = 2*l 27 28 # the Kernel Matrix (with the linear kernel) 26 # build the Kernel Matrix (with linear kernel) 27 # get the QP quadratic term 29 28 X = concatenate([x,-x]) 30 Q = multiply.outer(X,X)+1 31 32 # the linear term for the QP 29 Q = 1 + multiply.outer(X,X) 30 # get the QP linear term 33 31 Y = concatenate([y,-y]) 34 32 svr_epsilon = 3 35 33 b = Y + svr_epsilon * ones(Y.size) 36 34 37 H = Q 38 f = b 39 Aeq = concatenate([ones( l), -ones(l)]).reshape(1,N)35 # build the constraints (y.T * x = 0.0) 36 # 1.0*x0 + 1.0*x1 + ... - 1.0*xN = 0.0 37 Aeq = concatenate([ones(nx), -ones(nx)]).reshape(1,N) 40 38 Beq = array([0.]) 39 # set the bounds 41 40 lb = zeros(N) 42 41 ub = zeros(N) + 0.5 43 42 43 # build the constraints operator 44 44 from mystic.symbolic import linear_symbolic, solve, \ 45 45 generate_solvers as solvers, generate_constraint as constraint … … 55 55 mon = VerboseMonitor(10) 56 56 57 # solve for alpha 57 58 from mystic.solvers import diffev 58 alpha = diffev(objective, zip(lb,.1*ub), args=( H,f), npop=N*3, gtol=200, \59 alpha = diffev(objective, zip(lb,.1*ub), args=(Q,b), npop=N*3, gtol=200, \ 59 60 itermon=mon, \ 60 61 ftol=1e-5, bounds=zip(lb,ub), constraints=conserve, disp=1) … … 62 63 print 'solved x: ', alpha 63 64 print "constraint A*x == 0: ", inner(Aeq, alpha) 64 print "minimum 0.5*x' Hx + f'*x: ", objective(alpha, H, f)65 print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b) 65 66 66 sv1 = SupportVectors(alpha[:l]) 67 sv 2 = SupportVectors(alpha[l:])68 67 # calculate support vectors and regression function 68 sv1 = SupportVectors(alpha[:nx]) 69 sv2 = SupportVectors(alpha[nx:]) 69 70 R = RegressionFunction(x, y, alpha, svr_epsilon, LinearKernel) 70 71 -
mystic/examples/test_svr2.py
r828 r830 14 14 from mystic.svmtools import * 15 15 16 # a common objective function for solving a QP problem17 # (see http://www.mathworks.com/help/optim/ug/quadprog.html)18 def objective(x, H, f):19 return 0.5 * dot(dot(x, H),x) + dot(f,x)16 # define the objective function to match standard QP solver 17 # (see: http://www.mathworks.com/help/optim/ug/quadprog.html) 18 def objective(x, Q, b): 19 return 0.5 * dot(dot(x,Q),x) + dot(b,x) 20 20 21 # quadratic data plus scatter 22 x = arange(-5, 5.001) 23 y = x*x + 8*random.rand(x.size) 21 # define the data points (quadratic data with uniform scatter) 22 x = arange(-5, 5.001); nx = x.size 23 y = x*x + 8*random.rand(nx) 24 N = 2*nx 24 25 25 l = x.size 26 N = 2*l 27 28 # the Kernel Matrix 26 # build the Kernel Matrix (with custom k) 27 # get the QP quadratic term 29 28 X = concatenate([x,-x]) 30 29 def pk(a1,a2): 31 30 return (1+a1*a2)*(1+a1*a2) 32 31 Q = KernelMatrix(X, pk) 33 34 # the linear term for the QP 32 # get the QP linear term 35 33 Y = concatenate([y,-y]) 36 svr_epsilon = 4 ;34 svr_epsilon = 4 37 35 b = Y + svr_epsilon * ones(Y.size) 38 36 39 H = Q 40 f = b 41 Aeq = concatenate([ones( l), -ones(l)]).reshape(1,N)37 # build the constraints (y.T * x = 0.0) 38 # 1.0*x0 + 1.0*x1 + ... - 1.0*xN = 0.0 39 Aeq = concatenate([ones(nx), -ones(nx)]).reshape(1,N) 42 40 Beq = array([0.]) 41 # set the bounds 43 42 lb = zeros(N) 44 43 ub = zeros(N) + 0.5 45 44 45 # build the constraints operator 46 46 from mystic.symbolic import linear_symbolic, solve, \ 47 47 generate_solvers as solvers, generate_constraint as constraint … … 57 57 mon = VerboseMonitor(10) 58 58 59 # solve for alpha 59 60 from mystic.solvers import diffev 60 alpha = diffev(objective, zip(lb,.1*ub), args=( H,f), npop=N*3, gtol=200, \61 alpha = diffev(objective, zip(lb,.1*ub), args=(Q,b), npop=N*3, gtol=400, \ 61 62 itermon=mon, \ 62 63 ftol=1e-5, bounds=zip(lb,ub), constraints=conserve, disp=1) … … 64 65 print 'solved x: ', alpha 65 66 print "constraint A*x == 0: ", inner(Aeq, alpha) 66 print "minimum 0.5*x' Hx + f'*x: ", objective(alpha, H, f)67 print "minimum 0.5*x'Qx + b'*x: ", objective(alpha, Q, b) 67 68 68 sv1 = SupportVectors(alpha[:l]) 69 sv 2 = SupportVectors(alpha[l:])70 69 # calculate support vectors and regression function 70 sv1 = SupportVectors(alpha[:nx]) 71 sv2 = SupportVectors(alpha[nx:]) 71 72 R = RegressionFunction(x, y, alpha, svr_epsilon, pk) 72 73 -
mystic/mystic/svctools.py
r826 r830 25 25 26 26 27 def SupportVectors(alpha, y=None, eps = 0): 27 def SupportVectors(alpha, y=None, eps=0): 28 """indicies of nonzero alphas (at tolerance eps) 29 30 If labels y are provided, then group indicies by label 31 """ 28 32 import mystic.svmtools as svm 29 33 sv = svm.SupportVectors(alpha,eps) -
mystic/mystic/svmtools.py
r776 r830 11 11 from numpy import zeros, multiply, ndarray, vectorize, array 12 12 13 def KernelMatrix(X, k): 13 def InnerProduct(i1, i2): 14 return i1 * i2 15 16 def LinearKernel(i1, i2): 17 return 1. + i1 * i2 18 19 def KernelMatrix(X, k=InnerProduct): 14 20 n = X.size 15 21 Q = zeros((n,n)) … … 22 28 23 29 def SupportVectors(alpha, eps=0): 24 # return index of nonzero alphas (at a tolerance of epsilon)30 """indicies of nonzero alphas (at tolerance eps)""" 25 31 return (abs(alpha)>eps).nonzero()[0] 26 32 27 def StandardInnerProduct(i1, i2): 28 return i1 * i2 29 30 def LinearKernel(i1, i2): 31 return 1. + i1 * i2 32 33 def Bias(x, y, alpha, eps, kernel=StandardInnerProduct): 33 def Bias(x, y, alpha, eps, kernel=InnerProduct): 34 34 """ Compute regression bias for epsilon insensitive loss regression """ 35 35 N = len(alpha)/2 … … 40 40 return b 41 41 42 def RegressionFunction(x, y, alpha, eps, kernel= StandardInnerProduct):42 def RegressionFunction(x, y, alpha, eps, kernel=InnerProduct): 43 43 """ The Support Vector expansion. f(x) = Sum (ap - am) K(xi, x) + b """ 44 44 bias = Bias(x, y, alpha, eps, kernel)
Note: See TracChangeset
for help on using the changeset viewer.