Changeset 419


Ignore:
Timestamp:
08/25/10 13:40:01 (6 years ago)
Author:
mmckerns
Message:

reduce scipy function copies (and likely delay due to importing)

Location:
branches/alta/mystic-0.2a1
Files:
1 added
5 edited

Legend:

Unmodified
Added
Removed
  • branches/alta/mystic-0.2a1/Make.mm

    r418 r419  
    3636    forward_model.py \ 
    3737    helputil.py \ 
     38    linesearch.py \ 
    3839    mystic_math.py \ 
    3940    nested.py \ 
  • branches/alta/mystic-0.2a1/_scipy060optimize.py

    r418 r419  
    66# guarantee implied provided you keep this notice in all copies. 
    77# *****END NOTICE************ 
    8  
    9 # Minimization routines 
    10 # (removed: fmin_bfgs, fmin_cg) 
    118"""local copy of scipy.optimize""" 
    129 
    13 #__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 
     10#__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 'fmin_cg', 'fmin_bfgs', 
    1411#           'fminbound','brent', 'golden','bracket','rosen','rosen_der', 
    1512#           'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 
     
    1916from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \ 
    2017     squeeze, isscalar, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf 
    21 #import linesearch 
     18import linesearch 
    2219 
    2320# These have been copied from Numeric's MLab.py 
     
    622619    return (f2 - f1)/epsilon 
    623620 
    624 ''' 
    625621def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, 
    626622              epsilon=_epsilon, maxiter=None, full_output=0, disp=1, 
     
    808804 
    809805    return retlist 
    810 ''' 
    811  
    812 ''' 
     806 
    813807def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, 
    814808              maxiter=None, full_output=0, disp=1, retall=0, callback=None): 
     
    979973 
    980974    return retlist 
    981 ''' 
    982975 
    983976def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, 
     
    20422035    algor.append('Powell Direction Set Method.') 
    20432036 
    2044 #   print 
    2045 #   print "Nonlinear CG" 
    2046 #   print "============" 
    2047 #   start = time.time() 
    2048 #   x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 
    2049 #   print x 
    2050 #   times.append(time.time() - start) 
    2051 #   algor.append('Nonlinear CG     \t') 
    2052  
    2053 #   print 
    2054 #   print "BFGS Quasi-Newton" 
    2055 #   print "=================" 
    2056 #   start = time.time() 
    2057 #   x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 
    2058 #   print x 
    2059 #   times.append(time.time() - start) 
    2060 #   algor.append('BFGS Quasi-Newton\t') 
    2061  
    2062 #   print 
    2063 #   print "BFGS approximate gradient" 
    2064 #   print "=========================" 
    2065 #   start = time.time() 
    2066 #   x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 
    2067 #   print x 
    2068 #   times.append(time.time() - start) 
    2069 #   algor.append('BFGS without gradient\t') 
    2070  
     2037    print 
     2038    print "Nonlinear CG" 
     2039    print "============" 
     2040    start = time.time() 
     2041    x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 
     2042    print x 
     2043    times.append(time.time() - start) 
     2044    algor.append('Nonlinear CG     \t') 
     2045 
     2046    print 
     2047    print "BFGS Quasi-Newton" 
     2048    print "=================" 
     2049    start = time.time() 
     2050    x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 
     2051    print x 
     2052    times.append(time.time() - start) 
     2053    algor.append('BFGS Quasi-Newton\t') 
     2054 
     2055    print 
     2056    print "BFGS approximate gradient" 
     2057    print "=========================" 
     2058    start = time.time() 
     2059    x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 
     2060    print x 
     2061    times.append(time.time() - start) 
     2062    algor.append('BFGS without gradient\t') 
    20712063 
    20722064    print 
     
    20792071    algor.append('Newton-CG with hessian product') 
    20802072 
    2081  
    20822073    print 
    20832074    print "Newton-CG with full Hessian" 
  • branches/alta/mystic-0.2a1/_scipyoptimize.py

    r418 r419  
    11#!/usr/bin/env python 
    2 #__docformat__ = "restructuredtext en" 
    3 # ******NOTICE*************** 
    4 # optimize.py module by Travis E. Oliphant 
    5 # 
    6 # You may copy and use this module as you see fit with no 
    7 # guarantee implied provided you keep this notice in all copies. 
    8 # *****END NOTICE************ 
    9 """modified local copy of scipy.optimize""" 
    10  
    11 #__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', # 'fmin_cg', 'fmin_bfgs', 
     2"""modified algorithms from local copy of scipy.optimize""" 
     3 
     4#__all__ = ['fmin', 'fmin_powell', 'fmin_ncg', 'fmin_cg', 'fmin_bfgs', 
    125#           'fminbound','brent', 'golden','bracket','rosen','rosen_der', 
    136#           'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime', 
     
    239232    return grad 
    240233 
    241 def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon, 
    242               maxiter=None, full_output=0, disp=1, retall=0, callback=None): 
    243     """Minimize a function with nonlinear conjugate gradient algorithm. 
    244  
    245     :Parameters: 
    246  
    247       f -- the Python function or method to be minimized. 
    248       x0 : ndarray -- the initial guess for the minimizer. 
    249  
    250       fprime -- a function to compute the gradient of f. 
    251       args -- extra arguments to f and fprime. 
    252       gtol : number 
    253         stop when norm of gradient is less than gtol 
    254       norm : number 
    255         order of vector norm to use 
    256       epsilon :number 
    257         if fprime is approximated use this value for 
    258                  the step size (can be scalar or vector) 
    259       callback -- an optional user-supplied function to call after each 
    260                   iteration.  It is called as callback(xk), where xk is the 
    261                   current parameter vector. 
    262  
    263     :Returns: (xopt, {fopt, func_calls, grad_calls, warnflag}, {allvecs}) 
    264  
    265       xopt : ndarray  
    266         the minimizer of f. 
    267       fopt :number 
    268         the value of f(xopt). 
    269       func_calls : number  
    270         the number of function_calls. 
    271       grad_calls : number 
    272         the number of gradient calls. 
    273       warnflag :number  
    274         an integer warning flag: 
    275                   1 : 'Maximum number of iterations exceeded.' 
    276                   2 : 'Gradient and/or function calls not changing' 
    277       allvecs : ndarray 
    278         if retall then this vector of the iterates is returned 
    279  
    280     :OtherParameters: 
    281  
    282       maxiter :number 
    283         the maximum number of iterations. 
    284       full_output : number  
    285         if non-zero then return fopt, func_calls, grad_calls, 
    286                      and warnflag in addition to xopt. 
    287       disp : number 
    288         print convergence message if non-zero. 
    289       retall : number 
    290         return a list of results at each iteration if True 
    291  
    292     :SeeAlso: 
    293  
    294       fmin, fmin_powell, fmin_cg, 
    295              fmin_bfgs, fmin_ncg -- multivariate local optimizers 
    296       leastsq -- nonlinear least squares minimizer 
    297  
    298       fmin_l_bfgs_b, fmin_tnc, 
    299              fmin_cobyla -- constrained multivariate optimizers 
    300  
    301       anneal, brute -- global optimizers 
    302  
    303       fminbound, brent, golden, bracket -- local scalar minimizers 
    304  
    305       fsolve -- n-dimenstional root-finding 
    306  
    307       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding 
    308  
    309       fixed_point -- scalar fixed-point finder 
    310        
    311     Notes 
    312     --------------------------------------------- 
    313  
    314       Optimize the function, f, whose gradient is given by fprime using the 
    315       nonlinear conjugate gradient algorithm of Polak and Ribiere 
    316       See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 120-122. 
    317       """ 
    318     x0 = asarray(x0).flatten() 
    319     if maxiter is None: 
    320         maxiter = len(x0)*200 
    321     func_calls, f = wrap_function(f, args) 
    322     if fprime is None: 
    323         grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) 
    324     else: 
    325         grad_calls, myfprime = wrap_function(fprime, args) 
    326     gfk = myfprime(x0) 
    327     k = 0 
    328     N = len(x0) 
    329     xk = x0 
    330     old_fval = f(xk) 
    331     old_old_fval = old_fval + 5000 
    332  
    333     if retall: 
    334         allvecs = [xk] 
    335     sk = [2*gtol] 
    336     warnflag = 0 
    337     pk = -gfk 
    338     gnorm = vecnorm(gfk,ord=norm) 
    339     while (gnorm > gtol) and (k < maxiter): 
    340         deltak = numpy.dot(gfk,gfk) 
    341  
    342         # These values are modified by the line search, even if it fails 
    343         old_fval_backup = old_fval 
    344         old_old_fval_backup = old_old_fval 
    345  
    346         # NOTE: w/o scipy.optimize.linesearch, results may be noticeably worse. 
    347         try: #XXX: break dependency on scipy.optimize.linesearch 
    348             import scipy.optimize 
    349             alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    350             scipy.optimize.linesearch.line_search(f,myfprime,xk,pk,gfk,\ 
    351                                       old_fval,old_old_fval,c2=0.4) 
    352         except ImportError: 
    353             alpha_k = None 
    354  
    355         if alpha_k is None:  # line search failed -- use different one. 
    356             alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    357                      line_search(f,myfprime,xk,pk,gfk, 
    358                                  old_fval_backup,old_old_fval_backup) 
    359             if alpha_k is None or alpha_k == 0: 
    360                 # This line search also failed to find a better solution. 
    361                 warnflag = 2 
    362                 break 
    363         xk = xk + alpha_k*pk 
    364         if retall: 
    365             allvecs.append(xk) 
    366         if gfkp1 is None: 
    367             gfkp1 = myfprime(xk) 
    368         yk = gfkp1 - gfk 
    369         beta_k = pymax(0,numpy.dot(yk,gfkp1)/deltak) 
    370         pk = -gfkp1 + beta_k * pk 
    371         gfk = gfkp1 
    372         gnorm = vecnorm(gfk,ord=norm) 
    373         if callback is not None: 
    374             callback(xk) 
    375         k += 1 
    376  
    377  
    378     if disp or full_output: 
    379         fval = old_fval 
    380     if warnflag == 2: 
    381         if disp: 
    382             print "Warning: Desired error not necessarily achieved due to precision loss" 
    383             print "         Current function value: %f" % fval 
    384             print "         Iterations: %d" % k 
    385             print "         Function evaluations: %d" % func_calls[0] 
    386             print "         Gradient evaluations: %d" % grad_calls[0] 
    387  
    388     elif k >= maxiter: 
    389         warnflag = 1 
    390         if disp: 
    391             print "Warning: Maximum number of iterations has been exceeded" 
    392             print "         Current function value: %f" % fval 
    393             print "         Iterations: %d" % k 
    394             print "         Function evaluations: %d" % func_calls[0] 
    395             print "         Gradient evaluations: %d" % grad_calls[0] 
    396     else: 
    397         if disp: 
    398             print "Optimization terminated successfully." 
    399             print "         Current function value: %f" % fval 
    400             print "         Iterations: %d" % k 
    401             print "         Function evaluations: %d" % func_calls[0] 
    402             print "         Gradient evaluations: %d" % grad_calls[0] 
    403  
    404  
    405     if full_output: 
    406         retlist = xk, fval, func_calls[0], grad_calls[0], warnflag 
    407         if retall: 
    408             retlist += (allvecs,) 
    409     else: 
    410         retlist = xk 
    411         if retall: 
    412             retlist = (xk, allvecs) 
    413  
    414     return retlist 
    415  
    416 def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, 
    417               epsilon=_epsilon, maxiter=None, full_output=0, disp=1, 
    418               retall=0, callback=None): 
    419     """Minimize a function using the BFGS algorithm. 
    420      
    421     :Parameters: 
    422  
    423       f : the Python function or method to be minimized. 
    424       x0 : ndarray 
    425         the initial guess for the minimizer. 
    426  
    427       fprime : a function to compute the gradient of f. 
    428       args : extra arguments to f and fprime. 
    429       gtol : number 
    430         gradient norm must be less than gtol before succesful termination 
    431       norm : number 
    432         order of norm (Inf is max, -Inf is min) 
    433       epsilon : number 
    434         if fprime is approximated use this value for 
    435                  the step size (can be scalar or vector) 
    436       callback : an optional user-supplied function to call after each 
    437                   iteration.  It is called as callback(xk), where xk is the 
    438                   current parameter vector. 
    439  
    440     :Returns: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) 
    441  
    442       xopt : ndarray 
    443         the minimizer of f. 
    444  
    445       fopt : number 
    446         the value of f(xopt). 
    447       gopt : ndarray 
    448         the value of f'(xopt).  (Should be near 0) 
    449       Bopt : ndarray 
    450         the value of 1/f''(xopt).  (inverse hessian matrix) 
    451       func_calls : number 
    452         the number of function_calls. 
    453       grad_calls : number 
    454         the number of gradient calls. 
    455       warnflag : integer 
    456                   1 : 'Maximum number of iterations exceeded.' 
    457                   2 : 'Gradient and/or function calls not changing' 
    458       allvecs  :  a list of all iterates  (only returned if retall==1) 
    459  
    460     :OtherParameters: 
    461  
    462       maxiter : number 
    463         the maximum number of iterations. 
    464       full_output : number 
    465         if non-zero then return fopt, func_calls, grad_calls, 
    466                      and warnflag in addition to xopt. 
    467       disp : number 
    468         print convergence message if non-zero. 
    469       retall : number 
    470         return a list of results at each iteration if non-zero 
    471  
    472     :SeeAlso: 
    473  
    474       fmin, fmin_powell, fmin_cg, 
    475              fmin_bfgs, fmin_ncg -- multivariate local optimizers 
    476       leastsq -- nonlinear least squares minimizer 
    477  
    478       fmin_l_bfgs_b, fmin_tnc, 
    479              fmin_cobyla -- constrained multivariate optimizers 
    480  
    481       anneal, brute -- global optimizers 
    482  
    483       fminbound, brent, golden, bracket -- local scalar minimizers 
    484  
    485       fsolve -- n-dimenstional root-finding 
    486  
    487       brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding 
    488  
    489       fixed_point -- scalar fixed-point finder 
    490        
    491     Notes 
    492      
    493     ---------------------------------- 
    494  
    495       Optimize the function, f, whose gradient is given by fprime using the 
    496       quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) 
    497       See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. 
    498       """ 
    499     x0 = asarray(x0).squeeze() 
    500     if x0.ndim == 0: 
    501         x0.shape = (1,) 
    502     if maxiter is None: 
    503         maxiter = len(x0)*200 
    504     func_calls, f = wrap_function(f, args) 
    505     if fprime is None: 
    506         grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon)) 
    507     else: 
    508         grad_calls, myfprime = wrap_function(fprime, args) 
    509     gfk = myfprime(x0) 
    510     k = 0 
    511     N = len(x0) 
    512     I = numpy.eye(N,dtype=int) 
    513     Hk = I 
    514     old_fval = f(x0) 
    515     old_old_fval = old_fval + 5000 
    516     xk = x0 
    517     if retall: 
    518         allvecs = [x0] 
    519     sk = [2*gtol] 
    520     warnflag = 0 
    521     gnorm = vecnorm(gfk,ord=norm) 
    522     while (gnorm > gtol) and (k < maxiter): 
    523         pk = -numpy.dot(Hk,gfk) 
    524  
    525         # NOTE: w/o scipy.optimize.linesearch, results may be noticeably worse. 
    526         try: #XXX: break dependency on scipy.optimize.linesearch 
    527             import scipy.optimize 
    528             alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    529             scipy.optimize.linesearch.line_search(f,myfprime,xk,pk,gfk, \ 
    530                                       old_fval,old_old_fval) 
    531         except ImportError: 
    532             alpha_k = None 
    533  
    534         if alpha_k is None:  # line search failed try different one. 
    535             alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    536                      line_search(f,myfprime,xk,pk,gfk, 
    537                                  old_fval,old_old_fval) 
    538             if alpha_k is None: 
    539                 # This line search also failed to find a better solution. 
    540                 warnflag = 2 
    541                 break                 
    542         xkp1 = xk + alpha_k * pk 
    543         if retall: 
    544             allvecs.append(xkp1) 
    545         sk = xkp1 - xk 
    546         xk = xkp1 
    547         if gfkp1 is None: 
    548             gfkp1 = myfprime(xkp1) 
    549  
    550         yk = gfkp1 - gfk 
    551         gfk = gfkp1 
    552         if callback is not None: 
    553             callback(xk) 
    554         k += 1 
    555         gnorm = vecnorm(gfk,ord=norm) 
    556         if (gnorm <= gtol): 
    557             break 
    558  
    559         try: # this was handled in numeric, let it remaines for more safety 
    560             rhok = 1.0 / (numpy.dot(yk,sk)) 
    561         except ZeroDivisionError:  
    562             rhok = 1000.0 
    563             print "Divide-by-zero encountered: rhok assumed large" 
    564         if isinf(rhok): # this is patch for numpy 
    565             rhok = 1000.0 
    566             print "Divide-by-zero encountered: rhok assumed large" 
    567         A1 = I - sk[:,numpy.newaxis] * yk[numpy.newaxis,:] * rhok 
    568         A2 = I - yk[:,numpy.newaxis] * sk[numpy.newaxis,:] * rhok 
    569         Hk = numpy.dot(A1,numpy.dot(Hk,A2)) + rhok * sk[:,numpy.newaxis] \ 
    570                  * sk[numpy.newaxis,:] 
    571  
    572     if disp or full_output: 
    573         fval = old_fval 
    574     if warnflag == 2: 
    575         if disp: 
    576             print "Warning: Desired error not necessarily achieved due to precision loss" 
    577             print "         Current function value: %f" % fval 
    578             print "         Iterations: %d" % k 
    579             print "         Function evaluations: %d" % func_calls[0] 
    580             print "         Gradient evaluations: %d" % grad_calls[0] 
    581  
    582     elif k >= maxiter: 
    583         warnflag = 1 
    584         if disp: 
    585             print "Warning: Maximum number of iterations has been exceeded" 
    586             print "         Current function value: %f" % fval 
    587             print "         Iterations: %d" % k 
    588             print "         Function evaluations: %d" % func_calls[0] 
    589             print "         Gradient evaluations: %d" % grad_calls[0] 
    590     else: 
    591         if disp: 
    592             print "Optimization terminated successfully." 
    593             print "         Current function value: %f" % fval 
    594             print "         Iterations: %d" % k 
    595             print "         Function evaluations: %d" % func_calls[0] 
    596             print "         Gradient evaluations: %d" % grad_calls[0] 
    597  
    598     if full_output: 
    599         retlist = xk, fval, gfk, Hk, func_calls[0], grad_calls[0], warnflag 
    600         if retall: 
    601             retlist += (allvecs,) 
    602     else: 
    603         retlist = xk 
    604         if retall: 
    605             retlist = (xk, allvecs) 
    606  
    607     return retlist 
    608  
    609  
    610 def main(): 
    611     import time 
    612  
    613     times = [] 
    614     algor = [] 
    615     x0 = [0.8,1.2,0.7] 
    616     print "Nelder-Mead Simplex" 
    617     print "===================" 
    618     start = time.time() 
    619     x = fmin(rosen,x0) 
    620     print x 
    621     times.append(time.time() - start) 
    622     algor.append('Nelder-Mead Simplex\t') 
    623  
    624     print 
    625     print "Powell Direction Set Method" 
    626     print "===========================" 
    627     start = time.time() 
    628     x = fmin_powell(rosen,x0) 
    629     print x 
    630     times.append(time.time() - start) 
    631     algor.append('Powell Direction Set Method.') 
    632  
    633     print 
    634     print "Nonlinear CG" 
    635     print "============" 
    636     start = time.time() 
    637     x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) 
    638     print x 
    639     times.append(time.time() - start) 
    640     algor.append('Nonlinear CG     \t') 
    641  
    642     print 
    643     print "BFGS Quasi-Newton" 
    644     print "=================" 
    645     start = time.time() 
    646     x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) 
    647     print x 
    648     times.append(time.time() - start) 
    649     algor.append('BFGS Quasi-Newton\t') 
    650  
    651     print 
    652     print "BFGS approximate gradient" 
    653     print "=========================" 
    654     start = time.time() 
    655     x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) 
    656     print x 
    657     times.append(time.time() - start) 
    658     algor.append('BFGS without gradient\t') 
    659  
    660     print 
    661     print "Newton-CG with Hessian product" 
    662     print "==============================" 
    663     start = time.time() 
    664     x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) 
    665     print x 
    666     times.append(time.time() - start) 
    667     algor.append('Newton-CG with hessian product') 
    668  
    669     print 
    670     print "Newton-CG with full Hessian" 
    671     print "===========================" 
    672     start = time.time() 
    673     x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) 
    674     print x 
    675     times.append(time.time() - start) 
    676     algor.append('Newton-CG with full hessian') 
    677  
    678     print 
    679     print "\nMinimizing the Rosenbrock function of order 3\n" 
    680     print " Algorithm \t\t\t       Seconds" 
    681     print "===========\t\t\t      =========" 
    682     for k in range(len(algor)): 
    683         print algor[k], "\t -- ", times[k] 
    684234 
    685235if __name__ == "__main__": 
    686     main() 
     236    pass 
    687237 
    688238# End of file 
  • branches/alta/mystic-0.2a1/scipy_bfgs.py

    r418 r419  
    6060 
    6161from _scipyoptimize import zoom, _cubicmin, _quadmin, _epsilon 
    62 from _scipyoptimize import line_search, approx_fprime 
     62from _scipyoptimize import linesearch, line_search, approx_fprime 
    6363 
    6464########################################################################## 
     
    217217            pk = -numpy.dot(Hk,gfk) 
    218218 
    219             # If user has scipy installed, use that line search (since line search requires 
    220             # minpack.so). If scipy is not installed, use 
    221             # the function line_search (defined above, in helper functions) 
    222             try: 
    223                 import scipy.optimize 
    224                 alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    225                 scipy.optimize.linesearch.line_search(func,myfprime,xk,pk,gfk, 
    226                                       old_fval,old_old_fval) 
    227             except ImportError: 
    228                 alpha_k = None 
    229             #XXX If it gets stuck in the linesearch in scipy, 'exit' in the signal  
    230             # handler does not work. 
     219            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
     220                             linesearch.line_search(func,myfprime,xk,pk,gfk, \ 
     221                             old_fval,old_old_fval) 
     222            #XXX If it gets stuck in the linesearch in scipy, 
     223            # 'exit' in the signal handler does not work. 
    231224            #alpha_k = None 
    232225 
  • branches/alta/mystic-0.2a1/scipy_cg.py

    r418 r419  
    5858 
    5959from _scipyoptimize import zoom, _cubicmin, _quadmin, _epsilon 
    60 from _scipyoptimize import line_search, approx_fprime 
     60from _scipyoptimize import linesearch, line_search, approx_fprime 
    6161 
    6262########################################################################## 
     
    225225            old_old_fval_backup = old_old_fval 
    226226 
    227            # NOTE: Without scipy.optimize's linesearch, results may be noticeably worse. 
    228             try:   
    229                 import scipy.optimize 
    230                 alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
    231                 scipy.optimize.linesearch.line_search(func,myfprime,xk,pk,gfk,\ 
    232                                       old_fval,old_old_fval,c2=0.4) 
    233             except ImportError: 
    234                 alpha_k = None 
    235             #XXX Signal handler 'exit' won't work if it gets stuck inside scipy linesearch 
     227            alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ 
     228                             linesearch.line_search(func,myfprime,xk,pk,gfk,\ 
     229                             old_fval,old_old_fval,c2=0.4) 
     230            #XXX Signal handler 'exit' won't work 
     231            # if it gets stuck inside scipy linesearch 
    236232            #alpha_k = None 
    237233 
Note: See TracChangeset for help on using the changeset viewer.