Changeset 422
- Timestamp:
- 09/08/10 22:54:59 (6 years ago)
- Location:
- mystic
- Files:
-
- 2 added
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
mystic/examples/test_rosenbrock2.py
r219 r422 29 29 from mystic.termination import CandidateRelativeTolerance as CRT 30 30 31 31 #from scipy.optimize import fmin 32 32 from mystic.scipy_optimize import fmin, NelderMeadSimplexSolver 33 33 #print fmin(rosen,x0,retall=0,full_output=0,maxiter=121) 34 34 solver = NelderMeadSimplexSolver(len(x0)) 35 35 solver.SetInitialPoints(x0) -
mystic/examples/test_rosenbrock3.py
r219 r422 29 29 from mystic.termination import NormalizedChangeOverGeneration as NCOG 30 30 31 31 #from scipy.optimize import fmin_powell 32 32 from mystic.scipy_optimize import fmin_powell, PowellDirectionalSolver 33 print fmin_powell(rosen,x0,retall=0,full_output=0,maxiter=14)33 #print fmin_powell(rosen,x0,retall=0,full_output=0)#,maxiter=14) 34 34 solver = PowellDirectionalSolver(len(x0)) 35 35 solver.SetInitialPoints(x0) 36 36 solver.SetStrictRanges(min,max) 37 37 #solver.SetEvaluationLimits(maxiter=13) 38 38 solver.enable_signal_handler() 39 39 solver.Solve(rosen,termination=NCOG(tolerance=1e-4),StepMonitor=stepmon,disp=1) -
mystic/mystic/Make.mm
r370 r422 28 28 _genSow.py \ 29 29 _scipy060optimize.py \ 30 _scipyoptimize.py \ 30 31 abstract_map_solver.py \ 31 32 abstract_nested_solver.py \ … … 35 36 forward_model.py \ 36 37 helputil.py \ 38 linesearch.py \ 37 39 mystic_math.py \ 38 40 nested.py \ -
mystic/mystic/_scipy060optimize.py
r124 r422 6 6 # guarantee implied provided you keep this notice in all copies. 7 7 # *****END NOTICE************ 8 9 # Minimization routines10 # (removed: fmin_bfgs, fmin_cg)11 8 """local copy of scipy.optimize""" 12 9 13 __all__ = ['fmin', 'fmin_powell', 'fmin_ncg',14 'fminbound','brent', 'golden','bracket','rosen','rosen_der',15 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',16 'line_search', 'check_grad']10 #__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 'fmin_cg', 'fmin_bfgs', 11 # 'fminbound','brent', 'golden','bracket','rosen','rosen_der', 12 # 'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 13 # 'line_search', 'check_grad'] 17 14 18 15 import numpy 19 16 from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \ 20 17 squeeze, isscalar, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf 21 #import linesearch18 import linesearch 22 19 23 20 # These have been copied from Numeric's MLab.py … … 622 619 return (f2 - f1)/epsilon 623 620 624 '''625 621 def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, 626 622 epsilon=_epsilon, maxiter=None, full_output=0, disp=1, … … 808 804 809 805 return retlist 810 ''' 811 812 ''' 806 813 807 def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, 814 808 maxiter=None, full_output=0, disp=1, retall=0, callback=None): … … 979 973 980 974 return retlist 981 '''982 975 983 976 def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, … … 2042 2035 algor.append('Powell Direction Set Method.') 2043 2036 2044 # print 2045 # print "Nonlinear CG" 2046 # print "============" 2047 # start = time.time() 2048 # x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 2049 # print x 2050 # times.append(time.time() - start) 2051 # algor.append('Nonlinear CG \t') 2052 2053 # print 2054 # print "BFGS Quasi-Newton" 2055 # print "=================" 2056 # start = time.time() 2057 # x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 2058 # print x 2059 # times.append(time.time() - start) 2060 # algor.append('BFGS Quasi-Newton\t') 2061 2062 # print 2063 # print "BFGS approximate gradient" 2064 # print "=========================" 2065 # start = time.time() 2066 # x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 2067 # print x 2068 # times.append(time.time() - start) 2069 # algor.append('BFGS without gradient\t') 2070 2037 print 2038 print "Nonlinear CG" 2039 print "============" 2040 start = time.time() 2041 x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 2042 print x 2043 times.append(time.time() - start) 2044 algor.append('Nonlinear CG \t') 2045 2046 print 2047 print "BFGS Quasi-Newton" 2048 print "=================" 2049 start = time.time() 2050 x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 2051 print x 2052 times.append(time.time() - start) 2053 algor.append('BFGS Quasi-Newton\t') 2054 2055 print 2056 print "BFGS approximate gradient" 2057 print "=========================" 2058 start = time.time() 2059 x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 2060 print x 2061 times.append(time.time() - start) 2062 algor.append('BFGS without gradient\t') 2071 2063 2072 2064 print … … 2079 2071 algor.append('Newton-CG with hessian product') 2080 2072 2081 2082 2073 print 2083 2074 print "Newton-CG with full Hessian" -
mystic/mystic/abstract_solver.py
r336 r422 131 131 import mystic.termination 132 132 self._EARLYEXIT = mystic.termination.EARLYEXIT 133 133 return 134 134 135 135 def Solution(self): … … 283 283 return 284 284 else: 285 print "unknown option : %s ",s285 print "unknown option : %s" % s 286 286 return 287 287 self.signal_handler = handler -
mystic/mystic/termination.py
r239 r422 8 8 from numpy import absolute 9 9 abs = absolute 10 Inf = numpy.Inf 10 11 11 12 # a module level singleton. … … 66 67 return _CandidateRelativeTolerance 67 68 69 def SolutionImprovement(tolerance = 1e-5): 70 """sum of change in each parameter is < tolerance: 71 72 sum(abs(last_params - current_params)) <= tolerance""" 73 def _SolutionImprovement(inst): 74 update = inst.bestSolution - inst.trialSolution #XXX: if inf - inf ? 75 answer = numpy.add.reduce(abs(update)) <= tolerance 76 return answer 77 return _SolutionImprovement 78 79 def NormalizedCostTarget(fval = None, tolerance = 1e-6, generations = 30): 80 """normalized absolute difference from given cost value is < tolerance: 81 (if fval is not provided, then terminate when no improvement over g iterations) 82 83 abs(cost[-1] - fval)/fval <= tolerance *or* (cost[-1] - cost[-g]) = 0 """ 84 #NOTE: modified from original behavior 85 # original --> if generations: then return cost[-g] - cost[-1] < 0 86 # --> else: return fval != 0 and abs((best - fval)/fval) < tol 87 def _NormalizedCostTarget(inst): 88 if generations and fval == None: 89 hist = inst.energy_history 90 lg = len(hist) 91 #XXX: throws error when hist is shorter than generations ? 92 return lg > generations and (hist[-generations]-hist[-1]) < 0 93 if not generations and fval == None: return True 94 return abs(inst.bestEnergy-fval) <= abs(tolerance * fval) 95 return _NormalizedCostTarget 96 97 def VTRChangeOverGenerations(ftol = 0.005, gtol = 1e-6, generations = 30): 98 """change in cost is < gtol over a number of generations, 99 or cost of last iteration is < ftol: 100 101 cost[-g] - cost[-1] < gtol, where g=generations *or* cost[-1] < ftol.""" 102 def _VTRChangeOverGenerations(inst): 103 hist = inst.energy_history 104 lg = len(hist) 105 #XXX: throws error when hist is shorter than generations ? 106 return (lg > generations and (hist[-generations]-hist[-1]) < gtol)\ 107 or ( hist[-1] < ftol ) 108 return _VTRChangeOverGenerations 109 110 def PopulationSpread(tolerance=1e-6): 111 """normalized absolute deviation from best candidate is < tolerance: 112 113 abs(params - params[0]) < tolerance""" 114 def _PopulationSpread(inst): 115 sim = numpy.array(inst.population) 116 #if not len(sim[1:]): 117 # print "Warning: Invalid termination condition (nPop < 2)" 118 # return True 119 return all(abs(sim - sim[0]) <= abs(tolerance * sim[0])) 120 return _PopulationSpread 121 122 def GradientNormTolerance(tolerance=1e-5, norm=Inf): 123 """gradient norm is < tolerance, given user-supplied norm: 124 125 sum( abs(gradient)**norm )**(1.0/norm) < tolerance""" 126 def _GradientNormTolerance(inst): 127 try: 128 gfk = inst.gfk #XXX: need to ensure that gfk is an array ? 129 except: 130 print "Warning: Invalid termination condition (no gradient)" 131 return True 132 if norm == Inf: 133 gnorm = numpy.amax(abs(gfk)) 134 elif norm == -Inf: 135 gnorm = numpy.amin(abs(gfk)) 136 else: #XXX: throws error when norm = 0.0 137 #XXX: as norm > large, gnorm approaches amax(abs(gfk)) --> then inf 138 #XXX: as norm < -large, gnorm approaches amin(abs(gfk)) --> then -inf 139 gnorm = numpy.sum(abs(gfk)**norm,axis=0)**(1.0/norm) 140 return gnorm <= tolerance 141 return _GradientNormTolerance 142 68 143 # end of file -
mystic/mystic/tools.py
r372 r422 21 21 - CustomSow: A customizable 'n-variable' version of the basic Sow 22 22 - random_seed: sets the seed for calls to 'random()' 23 - wrap_nested: nest a function call within a function object 23 24 - wrap_function: bind an EvaluationMonitor and an evaluation counter 24 25 to a function object … … 291 292 return 292 293 294 def wrap_nested(function, inner_function): 295 """nest a function call within a function object 296 297 This is useful for nesting a constraints function in a cost function; 298 thus, the constraints will be enforced at every cost function evaluation. 299 """ 300 def function_wrapper(x): 301 _x = x[:] #XXX: trouble if x not a list or ndarray... maybe "deepcopy"? 302 return function(inner_function(_x)) 303 return function_wrapper 304 293 305 def wrap_function(function, args, EvaluationMonitor): 294 306 """bind an EvaluationMonitor and an evaluation counter
Note: See TracChangeset
for help on using the changeset viewer.