Changeset 422


Ignore:
Timestamp:
09/08/10 22:54:59 (6 years ago)
Author:
mmckerns
Message:

added several new termination conditions;
added tools.wrap_nested function;
uncommented local copies of scipy gradient solvers
minor bugfix in abstract_solver warning

Location:
mystic
Files:
2 added
7 edited

Legend:

Unmodified
Added
Removed
  • mystic/examples/test_rosenbrock2.py

    r219 r422  
    2929    from mystic.termination import CandidateRelativeTolerance as CRT 
    3030 
    31     from scipy.optimize import fmin 
     31   #from scipy.optimize import fmin 
    3232    from mystic.scipy_optimize import fmin, NelderMeadSimplexSolver 
    33     print fmin(rosen,x0,retall=0,full_output=0,maxiter=121) 
     33   #print fmin(rosen,x0,retall=0,full_output=0,maxiter=121) 
    3434    solver = NelderMeadSimplexSolver(len(x0)) 
    3535    solver.SetInitialPoints(x0) 
  • mystic/examples/test_rosenbrock3.py

    r219 r422  
    2929    from mystic.termination import NormalizedChangeOverGeneration as NCOG 
    3030 
    31     from scipy.optimize import fmin_powell 
     31   #from scipy.optimize import fmin_powell 
    3232    from mystic.scipy_optimize import fmin_powell, PowellDirectionalSolver 
    33     print fmin_powell(rosen,x0,retall=0,full_output=0,maxiter=14) 
     33   #print fmin_powell(rosen,x0,retall=0,full_output=0)#,maxiter=14) 
    3434    solver = PowellDirectionalSolver(len(x0)) 
    3535    solver.SetInitialPoints(x0) 
    3636    solver.SetStrictRanges(min,max) 
    37     solver.SetEvaluationLimits(maxiter=13) 
     37   #solver.SetEvaluationLimits(maxiter=13) 
    3838    solver.enable_signal_handler() 
    3939    solver.Solve(rosen,termination=NCOG(tolerance=1e-4),StepMonitor=stepmon,disp=1) 
  • mystic/mystic/Make.mm

    r370 r422  
    2828    _genSow.py \ 
    2929    _scipy060optimize.py \ 
     30    _scipyoptimize.py \ 
    3031    abstract_map_solver.py \ 
    3132    abstract_nested_solver.py \ 
     
    3536    forward_model.py \ 
    3637    helputil.py \ 
     38    linesearch.py \ 
    3739    mystic_math.py \ 
    3840    nested.py \ 
  • mystic/mystic/_scipy060optimize.py

    r124 r422  
    66# guarantee implied provided you keep this notice in all copies. 
    77# *****END NOTICE************ 
    8  
    9 # Minimization routines 
    10 # (removed: fmin_bfgs, fmin_cg) 
    118"""local copy of scipy.optimize""" 
    129 
    13 __all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 
    14            'fminbound','brent', 'golden','bracket','rosen','rosen_der', 
    15            'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 
    16            'line_search', 'check_grad'] 
     10#__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 'fmin_cg', 'fmin_bfgs', 
     11#           'fminbound','brent', 'golden','bracket','rosen','rosen_der', 
     12#           'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 
     13#           'line_search', 'check_grad'] 
    1714 
    1815import numpy 
    1916from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \ 
    2017     squeeze, isscalar, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf 
    21 #import linesearch 
     18import linesearch 
    2219 
    2320# These have been copied from Numeric's MLab.py 
     
    622619    return (f2 - f1)/epsilon 
    623620 
    624 ''' 
    625621def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, 
    626622              epsilon=_epsilon, maxiter=None, full_output=0, disp=1, 
     
    808804 
    809805    return retlist 
    810 ''' 
    811  
    812 ''' 
     806 
    813807def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, 
    814808              maxiter=None, full_output=0, disp=1, retall=0, callback=None): 
     
    979973 
    980974    return retlist 
    981 ''' 
    982975 
    983976def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, 
     
    20422035    algor.append('Powell Direction Set Method.') 
    20432036 
    2044 #   print 
    2045 #   print "Nonlinear CG" 
    2046 #   print "============" 
    2047 #   start = time.time() 
    2048 #   x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 
    2049 #   print x 
    2050 #   times.append(time.time() - start) 
    2051 #   algor.append('Nonlinear CG     \t') 
    2052  
    2053 #   print 
    2054 #   print "BFGS Quasi-Newton" 
    2055 #   print "=================" 
    2056 #   start = time.time() 
    2057 #   x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 
    2058 #   print x 
    2059 #   times.append(time.time() - start) 
    2060 #   algor.append('BFGS Quasi-Newton\t') 
    2061  
    2062 #   print 
    2063 #   print "BFGS approximate gradient" 
    2064 #   print "=========================" 
    2065 #   start = time.time() 
    2066 #   x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 
    2067 #   print x 
    2068 #   times.append(time.time() - start) 
    2069 #   algor.append('BFGS without gradient\t') 
    2070  
     2037    print 
     2038    print "Nonlinear CG" 
     2039    print "============" 
     2040    start = time.time() 
     2041    x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 
     2042    print x 
     2043    times.append(time.time() - start) 
     2044    algor.append('Nonlinear CG     \t') 
     2045 
     2046    print 
     2047    print "BFGS Quasi-Newton" 
     2048    print "=================" 
     2049    start = time.time() 
     2050    x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 
     2051    print x 
     2052    times.append(time.time() - start) 
     2053    algor.append('BFGS Quasi-Newton\t') 
     2054 
     2055    print 
     2056    print "BFGS approximate gradient" 
     2057    print "=========================" 
     2058    start = time.time() 
     2059    x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 
     2060    print x 
     2061    times.append(time.time() - start) 
     2062    algor.append('BFGS without gradient\t') 
    20712063 
    20722064    print 
     
    20792071    algor.append('Newton-CG with hessian product') 
    20802072 
    2081  
    20822073    print 
    20832074    print "Newton-CG with full Hessian" 
  • mystic/mystic/abstract_solver.py

    r336 r422  
    131131        import mystic.termination 
    132132        self._EARLYEXIT       = mystic.termination.EARLYEXIT 
    133  
     133        return 
    134134 
    135135    def Solution(self): 
     
    283283                    return 
    284284                else: 
    285                     print "unknown option : %s ", s 
     285                    print "unknown option : %s" % s 
    286286            return 
    287287        self.signal_handler = handler 
  • mystic/mystic/termination.py

    r239 r422  
    88from numpy import absolute 
    99abs = absolute 
     10Inf = numpy.Inf 
    1011 
    1112# a module level singleton. 
     
    6667    return _CandidateRelativeTolerance 
    6768 
     69def SolutionImprovement(tolerance = 1e-5):   
     70    """sum of change in each parameter is < tolerance: 
     71 
     72sum(abs(last_params - current_params)) <= tolerance""" 
     73    def _SolutionImprovement(inst): 
     74        update = inst.bestSolution - inst.trialSolution #XXX: if inf - inf ? 
     75        answer = numpy.add.reduce(abs(update)) <= tolerance 
     76        return answer 
     77    return _SolutionImprovement 
     78 
     79def NormalizedCostTarget(fval = None, tolerance = 1e-6, generations = 30): 
     80    """normalized absolute difference from given cost value is < tolerance: 
     81(if fval is not provided, then terminate when no improvement over g iterations) 
     82 
     83abs(cost[-1] - fval)/fval <= tolerance *or* (cost[-1] - cost[-g]) = 0 """ 
     84    #NOTE: modified from original behavior 
     85    #  original --> if generations: then return cost[-g] - cost[-1] < 0 
     86    #           --> else: return fval != 0 and abs((best - fval)/fval) < tol 
     87    def _NormalizedCostTarget(inst): 
     88         if generations and fval == None: 
     89             hist = inst.energy_history 
     90             lg = len(hist) 
     91             #XXX: throws error when hist is shorter than generations ? 
     92             return lg > generations and (hist[-generations]-hist[-1]) < 0 
     93         if not generations and fval == None: return True 
     94         return abs(inst.bestEnergy-fval) <= abs(tolerance * fval) 
     95    return _NormalizedCostTarget 
     96 
     97def VTRChangeOverGenerations(ftol = 0.005, gtol = 1e-6, generations = 30): 
     98    """change in cost is < gtol over a number of generations, 
     99or cost of last iteration is < ftol: 
     100 
     101cost[-g] - cost[-1] < gtol, where g=generations *or* cost[-1] < ftol.""" 
     102    def _VTRChangeOverGenerations(inst): 
     103         hist = inst.energy_history 
     104         lg = len(hist) 
     105         #XXX: throws error when hist is shorter than generations ? 
     106         return (lg > generations and (hist[-generations]-hist[-1]) < gtol)\ 
     107                or ( hist[-1] < ftol ) 
     108    return _VTRChangeOverGenerations 
     109 
     110def PopulationSpread(tolerance=1e-6): 
     111    """normalized absolute deviation from best candidate is < tolerance: 
     112 
     113abs(params - params[0]) < tolerance""" 
     114    def _PopulationSpread(inst): 
     115         sim = numpy.array(inst.population) 
     116         #if not len(sim[1:]): 
     117         #    print "Warning: Invalid termination condition (nPop < 2)" 
     118         #    return True 
     119         return all(abs(sim - sim[0]) <= abs(tolerance * sim[0])) 
     120    return _PopulationSpread 
     121 
     122def GradientNormTolerance(tolerance=1e-5, norm=Inf):  
     123    """gradient norm is < tolerance, given user-supplied norm: 
     124 
     125sum( abs(gradient)**norm )**(1.0/norm) < tolerance""" 
     126    def _GradientNormTolerance(inst): 
     127        try: 
     128            gfk = inst.gfk #XXX: need to ensure that gfk is an array ? 
     129        except: 
     130            print "Warning: Invalid termination condition (no gradient)" 
     131            return True 
     132        if norm == Inf: 
     133            gnorm = numpy.amax(abs(gfk)) 
     134        elif norm == -Inf: 
     135            gnorm = numpy.amin(abs(gfk)) 
     136        else: #XXX: throws error when norm = 0.0 
     137           #XXX: as norm > large, gnorm approaches amax(abs(gfk)) --> then inf 
     138           #XXX: as norm < -large, gnorm approaches amin(abs(gfk)) --> then -inf 
     139            gnorm = numpy.sum(abs(gfk)**norm,axis=0)**(1.0/norm) 
     140        return gnorm <= tolerance 
     141    return _GradientNormTolerance 
     142 
    68143# end of file 
  • mystic/mystic/tools.py

    r372 r422  
    2121    - CustomSow: A customizable 'n-variable' version of the basic Sow 
    2222    - random_seed: sets the seed for calls to 'random()' 
     23    - wrap_nested: nest a function call within a function object 
    2324    - wrap_function: bind an EvaluationMonitor and an evaluation counter 
    2425        to a function object 
     
    291292    return 
    292293 
     294def wrap_nested(function, inner_function): 
     295    """nest a function call within a function object 
     296 
     297This is useful for nesting a constraints function in a cost function; 
     298thus, the constraints will be enforced at every cost function evaluation. 
     299    """ 
     300    def function_wrapper(x): 
     301        _x = x[:] #XXX: trouble if x not a list or ndarray... maybe "deepcopy"? 
     302        return function(inner_function(_x)) 
     303    return function_wrapper 
     304 
    293305def wrap_function(function, args, EvaluationMonitor): 
    294306    """bind an EvaluationMonitor and an evaluation counter 
Note: See TracChangeset for help on using the changeset viewer.