bigoptimize
index
/.automount/ioasun3/root/disk/users/kybic/work/python_volumes/bigoptimize.py

 Module bigoptimize implements multidimensional nonlinear optimization
technique and some prototype problems
 
Jan Kybic, October 2000
$Id: bigoptimize.html,v 1.1 2002/08/26 14:11:15 jkybic Exp $

 
Modules
                      
BIGsplines
Gnuplot
LinearAlgebra
bigmultigrid
bigpyramid
bigregister
bigsplines
bigtools
copy
copy_reg
math
multiarray
pickle
string
time
types

 
Classes
                      
EvaluableProblem
CorrectedMultigridableProblem
QCorrectedMultigridableProblem
WCorrectedMultigridableProblem
HarmonicProblem
MultigridableProblem(HarmonicProblem, SamplingReducer)
MROptimizerState
MultigridOptimizer
OptimizerGdes
OptimizerGdesP
OptimizerGdesQ
OptimizerML
OptimizerMLH
OptimizerMLdH
SimpleReducer
SamplingReducer
SplineReducer

 
class CorrectedMultigridableProblem(EvaluableProblem)
            Take an MultigridableProblem and make another multigridable problem
out of it. A correction term is added so that the fixed point of the fine
grid solution is a fixed point of the coarse grid solution, too.
 
The correction comes from Bouman ICIP99 paper and is:
 
E'=E-<r,x>
where r= grad_x1 E1(R(x0)) - R grad_x0 E0(x0)
 
This implementation inherits: updateFine
It encapsulates the rest of the EvaluableProblem routines
 
  
__init__(self, p)
 p is an MultigridableProblem instance which we will encapsulate
canReduce(self)
dHerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
gerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
getE(self, x)
 Return corrected criterion
getEg(self, x)
 Return corrected criterion and gradient
getEgDh(self, x)
getInitialX(self)
getReduced(self, x)
 returns (pc,xc), the coarsened problem and the coarsened x
numDh(self, x, h=9.9999999999999995e-07) from EvaluableProblem
numg(self, x, h=9.9999999999999995e-07) from EvaluableProblem
reduceX(self, x)
updateFine(self, xf, xc0, xc1)

 
class EvaluableProblem
            Prototype multidimensional optimization problem
 
E=(x0-1)^4+(x1-2)^4+(x2-3)^4+(x0-1)^2*(x1-2)^2*(x2-3)^2
 
The minimum is clearly at (1,2,3)
 
  
dHerr(self, x, h=9.9999999999999995e-07)
gerr(self, x, h=9.9999999999999995e-07)
getE(self, x)
getEg(self, x)
getEgDh(self, x)
getInitialX(self)
numDh(self, x, h=9.9999999999999995e-07)
numg(self, x, h=9.9999999999999995e-07)

 
class HarmonicProblem(EvaluableProblem)
            Larger 1D prototype problem, whose solution is a harmonic function
 
E=Int  f'^2+T f^2  dx
 
evaluated at x=0..1 on a N point grid. Fix f[0]=1
Use MirrorOnBounds boundary conditions
 
  
__init__(self, T=1.0, xshape=(10,))
dHerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
gerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
getE(self, xp)
getEg(self, xp)
getEgDh(self, xp)
getInitialX(self)
newx(self, xp)
numDh(self, x, h=9.9999999999999995e-07) from EvaluableProblem
numg(self, x, h=9.9999999999999995e-07) from EvaluableProblem

 
class MROptimizerState
            Encapsulates a state of a one level of a multiresolution optimizer.
After instantiation, solveMR() is called to solve the problem.
Then, the final state of the problem can be queried using
getProblem(), or getOptimizer().
 
New parameters include: optimizerClass (defaults to OptimizerGdesQ)
and minlevel, minimum level at which we optimize (default is 0, optimize
at all levels). If xtollast is set, it replaces xtol for level 0.
maxlevel is the maximum level at which we descend.
 
  
__init__(self, problem, par=None, level=0)
 Accepts an instance of an EvaluablemProblem problem,
implementing the Multigridable protocol (functions canReduce(),
getReduced(), and updateFine() )
canReduce(self)
getOptimizer(self)
getProblem(self)
getReduced(self)
 Returns a reduced version of itself, passing the current
optimization state along
smoothToConvergence(self)
 Optimizes until convergence is reached.
Returns a reference to self.self.
solveMR(self)
 Finds a solution to the problem using multiresolution approach.
Returns a reference to self.self.

 
class MultigridOptimizer
            A class representing a multigrid optimizer. Usage:
 
m=MultigridOptimizer(OptimizerMLdH,
                     inismooth=3,presmooth=1,
                     postsmooth=2,intsmooth=2,verbose=0,
                     abstol=1e-50,reltol=1e-5)
x=m.optimizeFMG(problem)
 
  
__init__(self, optimizer, presmooth=3, postsmooth=3, intsmooth=2, verbose=0, maxiter=100, abstol=1e-50, reltol=1.0000000000000001e-05)
makeTcycle(self, p, x, level=0, callbackE=None)
 Returns a value improved by a multigrid T cycle
Its main characteristics are not to iterate at fine level
when iteration at coarse level improved also the fine level
criterion.
makeVcycle(self, p, x, level=0, callbackE=None, maxlevel=1000, parentopt=None)
 Returns a value improved by a multigrid V cycle
makeWcycle(self, p, x, level=0, callbackE=None)
 Returns a value of x improved by a multigrid W cycle
optimizeFMG(self, p, level=0, maxiter=500, callbackE=None)
 Full multigrid optimizer of problem p. Returns the optimal value
x found.

 
class MultigridableProblem(HarmonicProblem, SamplingReducer)
            An extension of HarmonicProblem to show the multigrid protocol.
 
The following methods are added:
getReduced(x) which gets a reduced version of the problem and
              the parameters x
updateFine(xf,xc0,xc1)  which updates xf at fine level with the
                        difference xc1-xc0 at the coarse level
canReduce()   returns true if further reduction is possible
                        
The following implementation is only prototypical and will
probably have to be modified for a specific problem.
 
  
__init__(self, T=1.0, xshape=(10,)) from HarmonicProblem
canReduce(self)
dHerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
expand(self, x, targetlen=None) from SimpleReducer
expandexceptfirst(self, x, targetlen=None) from SimpleReducer
gerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
getE(self, xp) from HarmonicProblem
getEg(self, xp) from HarmonicProblem
getEgDh(self, xp) from HarmonicProblem
getInitialX(self) from HarmonicProblem
getReduced(self, x)
 returns (pc,xc), the coarsened problem and the coarsened x
newx(self, xp) from HarmonicProblem
numDh(self, x, h=9.9999999999999995e-07) from EvaluableProblem
numg(self, x, h=9.9999999999999995e-07) from EvaluableProblem
reduce(self, x) from SimpleReducer
reduceX(self, x)
reduceexceptfirst(self, x) from SimpleReducer
simpleexpand(self, x, targetlen=None) from SamplingReducer
simplereduce(self, x) from SamplingReducer
updateFine(self, xf, xc0, xc1)
 returns an updated array xf based on the coarse x before (xc0)
and after (xc1) coarse level smoothing

 
class OptimizerGdes
            Gradient descent optimizer for EvaluableProblem-like classes.
Just go down along the gradient, adapting the step size as needed.
Usage:
 
  o=OptimizerGdes(evaluableProblemInstance)
while not o.hasConverged():
      o.makeStep()
print o.getX()
 
optional parameters to the constructor include the starting point
'startx', tolerances 'abstol', 'reltol', verbosity flag 'verbose'.
 
Callback function 'cbf' can also be specified.
 
  
__init__(self, problem, startx=None, abstol=1e-50, reltol=9.9999999999999995e-07, verbose=0, xtol=9.9999999999999995e-07, startstep=1.0, minstep=1e-50, stepf=10.0, cbf=None)
getE(self)
getStep(self)
getX(self)
hasConverged(self)
hasConvergedE(self)
makeStep(self)
makeSuccessfulStep(self)
setStep(self, step)
setX(self, x)
showEnvironment(self, title=None, grad=None, step=None)

 
class OptimizerGdesP
            Gradient descent optimizer for EvaluableProblem-like classes.
Just go down along the gradient, adapting the step size as needed.
 
It works almost exactly as bigoptimize.OptimizerGdes, except that
parameters are given as a parameter object
Usage:
 
o=OptimizerGdesP(evaluableProblemInstance,parameters=None)
while not o.hasConverged():
      o.makeStep()
print o.getX()
 
optional parameters include the starting point
'startx', tolerances 'xtol', starting step 'startstep',
step mult. factors 'stepf' and 'stepdownf', verbosity flag 'verbose'.
 
Callback function 'cbf' can also be specified, which is then
called whenever the criterion is evaluated.
 
Some optimization has been performed to minimize the number
of evaluations needed.
 
  
__init__(self, problem, par=None)
getE(self)
getEg(self)
getIterCount(self)
getX(self)
hasConverged(self)
makeStep(self)
makeSuccessfulStep(self)
setX(self, x)
smoothToConvergence(self)
 Iterate until convergence is reached. By virtue of the
stopping criterion, the last evaluation is always the best

 
class OptimizerGdesQ(OptimizerGdesP)
            Adds automatic step-size calculation to OptimizerGdesP
 
  
__init__(self, problem, par=None) from OptimizerGdesP
getE(self) from OptimizerGdesP
getEg(self) from OptimizerGdesP
getIterCount(self) from OptimizerGdesP
getX(self) from OptimizerGdesP
hasConverged(self) from OptimizerGdesP
makeStep(self)
makeSuccessfulStep(self) from OptimizerGdesP
setX(self, x) from OptimizerGdesP
smoothToConvergence(self) from OptimizerGdesP

 
class OptimizerML
            Prototype optimizer for EvaluableProblem-like classes. It uses
Marquardt-Levenberg like method using the gradient but does not use the
Hessian. Usage:
 
o=OptimizerML(evaluableProblemInstance)
while not o.hasConverged():
      o.makeStep()
print o.getX()
 
optional parameters to the constructor include the starting point
'startx', tolerances 'abstol', 'reltol', verbosity flag 'verbose',
lambda multiplication factor 'lambdaf', maximum and minimum
values for lambda 'maxlambda' and 'minlambda'. Callback function 'cbf'
can also be specified.
 
  
__init__(self, problem, startx=None, abstol=1e-50, reltol=9.9999999999999995e-07, verbose=0, xtol=9.9999999999999995e-07, lambdaf=10.0, maxlambda=1e+100, minlambda=1e-50, cbf=None)
getE(self)
getX(self)
hasConverged(self)
hasConvergedE(self)
makeStep(self)
makeSuccessfulStep(self)
setX(self, x)

 
class OptimizerMLH
            Optimizer for EvaluableProblem-like classes that support the
'getEgH' method. Marquardt-Levenberg like method using the gradient
and the full Hessian approximated using first derivatives. Usage:
 
o=OptimizerMLH(evaluableProblemInstance)
while not o.hasConverged():
      o.makeStep()
print o.getX()
 
optional parameters to the constructor include the starting point
'startx', tolerances 'abstol', 'reltol', verbosity flag 'verbose',
lambda multiplication factor 'lambdaf', maximum and minimum
values for lambda 'maxlambda' and 'minlambda'. Callback function 'cbf'
can also be specified.
 
  
__init__(self, problem, startx=None, abstol=1e-50, reltol=9.9999999999999995e-07, xtol=9.9999999999999995e-07, verbose=0, lambdaf=10.0, maxlambda=1e+100, minlambda=1e-50, cbf=None)
hasConverged(self)
hasConvergedE(self)
makeStep(self)
makeSuccessfulStep(self)
setX(self, x)

 
class OptimizerMLdH
            Prototype optimizer for EvaluableProblem-like classes. It uses
Marquardt-Levenberg like method using the gradient and the diagonal of
the Hessian. Usage:
 
o=OptimizerMLdH(evaluableProblemInstance)
while not o.hasConverged():
      o.makeStep()
print o.getX()
 
optional parameters to the constructor include the starting point
'startx', tolerances 'abstol', 'reltol', verbosity flag 'verbose',
lambda multiplication factor 'lambdaf', maximum and minimum
values for lambda 'maxlambda' and 'minlambda'. Callback function 'cbf'
can also be specified.
 
  
__init__(self, problem, startx=None, abstol=1e-50, reltol=9.9999999999999995e-07, xtol=9.9999999999999995e-07, verbose=0, lambdaf=10.0, maxlambda=1e+100, minlambda=1e-50, cbf=None)
getE(self)
getX(self)
hasConverged(self)
hasConvergedE(self)
makeStep(self)
makeSuccessfulStep(self)
setX(self, x)

 
class QCorrectedMultigridableProblem(CorrectedMultigridableProblem)
            Quadratic extension to Bouman correction
The corrected problem is:
 
E'=E-betacorr*<r,x>+<r,x>^2
 
This assures that E' does no tend to -infty in any direction
and consequently the existence of the minimum.
 
  
__init__(self, p)
 p is an MultigridableProblem instance which we will encapsulate
canReduce(self) from CorrectedMultigridableProblem
dHerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
gerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
getE(self, x)
 Return corrected criterion
getEg(self, x)
 Return corrected criterion and gradient
getEgDh(self, x)
 Return corrected criterion and gradient
getInitialX(self) from CorrectedMultigridableProblem
getReduced(self, x)
 returns (pc,xc), the coarsened problem and the coarsened x
numDh(self, x, h=9.9999999999999995e-07) from EvaluableProblem
numg(self, x, h=9.9999999999999995e-07) from EvaluableProblem
reduceX(self, x) from CorrectedMultigridableProblem
updateFine(self, xf, xc0, xc1) from CorrectedMultigridableProblem

 
class SamplingReducer(SimpleReducer)
            The SimpleReducer class assembles the expand and reduce operations
based on sampling and linear interpolation. It works
for matrices with arbitrary dimensions.
 
  
expand(self, x, targetlen=None) from SimpleReducer
expandexceptfirst(self, x, targetlen=None) from SimpleReducer
reduce(self, x) from SimpleReducer
reduceexceptfirst(self, x) from SimpleReducer
simpleexpand(self, x, targetlen=None)
 expand 1D vector by 3pt averaging
simplereduce(self, x)
 reduce 1D vector by 3pt averaging

 
class SimpleReducer
            The SimpleReducer class assembles the expand and reduce operations
based on linear averaging and linear interpolation. It works
for matrices with arbitrary dimensions.
 
  
expand(self, x, targetlen=None)
 expand arbitrary vector x
expandexceptfirst(self, x, targetlen=None)
 expand x except the first dimension
reduce(self, x)
 reduce arbitrary vector/matrix x
reduceexceptfirst(self, x)
 reduce x except the first dimension
simpleexpand(self, x, targetlen=None)
 expand 1D vector by 3pt averaging
simplereduce(self, x)
 reduce 1D vector by 3pt averaging

 
class SplineReducer(SimpleReducer)
            
  
expand(self, x, targetlen=None)
expandexceptfirst(self, x, targetlen=None) from SimpleReducer
getsplinedegree(self)
reduce(self, x)
reduceexceptfirst(self, x) from SimpleReducer
simpleexpand(self, x, targetlen=None) from SimpleReducer
simplereduce(self, x) from SimpleReducer

 
class WCorrectedMultigridableProblem(CorrectedMultigridableProblem)
            Windowed extension to Bouman correction
 
 
The corrected problem is:
 
E'=E-<r,x>*W(<r,x>)
 
where W(z)=exp(-(z/sigma)^2) is a Gaussian shaped window
 
where the window size 'sigma^2=20 zm^2' is calculated from
 
zm=| I R x- x | * |r| /2
 
This assures that E' coincides with E except a small neighborhood
of the original point.
 
  
__init__(self, p)
 p is an MultigridableProblem instance which we will encapsulate
canReduce(self) from CorrectedMultigridableProblem
dHerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
gerr(self, x, h=9.9999999999999995e-07) from EvaluableProblem
getE(self, x)
 Return corrected criterion
getEg(self, x)
 Return corrected criterion and gradient
getEgDh(self, x)
 Return corrected criterion, gradient and Hessian
getInitialX(self) from CorrectedMultigridableProblem
getReduced(self, x)
 returns (pc,xc), the coarsened problem and the coarsened x
getTau(self, x)
 Calculate the scalar product tau2=<r,x-x0>
getWindow(self, tau2)
 Calculate the window value
numDh(self, x, h=9.9999999999999995e-07) from EvaluableProblem
numg(self, x, h=9.9999999999999995e-07) from EvaluableProblem
reduceX(self, x) from CorrectedMultigridableProblem
updateFine(self, xf, xc0, xc1) from CorrectedMultigridableProblem

 
Functions
                      
alltrue = reduce(...)
array(...)
array(sequence, typecode=None, copy=1, savespace=0) will return a new array formed from the given (potentially nested) sequence with type given by typecode.  If no typecode is given, then the type will be determined as the minimum type required to hold the objects in sequence.  If copy is zero and sequence is already an array, a reference will be returned.  If savespace is nonzero, the new array will maintain its precision in operations.
choose(...)
choose(a, (b1,b2,...))
compare_optimizers(p=None)
cross_correlate(...)
cross_correlate(a,v)
cumproduct = accumulate(...)
cumsum = accumulate(...)
fromstring(...)
fromstring(string, count=None, typecode='l') returns a new 1d array initialized from the raw binary data in string.  If count is not equal to None, the new array will have count elements, otherwise it's size is determined by the size of string.
product = reduce(...)
repeat(...)
repeat(a, n, axis=0)
reshape(...)
reshape(a, (d1, d2, ..., dn)).  Change the shape of a to be an n-dimensional array with dimensions given by d1...dn.  One dimension is allowed to be None.  This dimension will be set to whatever value will make the size of the new array equal the size of the old one.  Note: the size specified for the new array must be exactly equal to the size of the  old one or an error will occur.  This returns a completely new array with the data of the old one copied.  Use a.shape=(...) for no data copying.
searchsorted = binarysearch(...)
binarysearch(a,v)
sometrue = reduce(...)
sum = reduce(...)
take(...)
take(a, indices, axis=0).  Selects the elements in indices from array a along the given axis.
testFMG(p=None)
testMR(p=None)
testVcycle(p=None)
testWcycle()
test_evproblem(p, x=None, h=9.9999999999999995e-07)
test_optimizerGdes(p=None, x=None, progress=0, solution=0)
test_optimizerML(p=None, x=None, progress=0, solution=0)
test_optimizerMLdH(p=None, x=None, progress=0, solution=0)
test_twoGrid()
twoGrid(p, verb=1, presmooth=2, smooth=30, twogrid=1)
 Test of the multigrid concept. Solve problem p on two grids
and return the solution.
zeros(...)
zeros((d1,...,dn),typecode,savespace) will return a new array of shape (d1,...,dn) and type typecode (default 'l') with all it's entries initialized to zero.  If savespace (default 0) is nonzero the array will be a spacesaver array.

 
Constants
                      ArrayType = <type 'array'>
Complex = 'D'
Complex0 = 'F'
Complex16 = 'F'
Complex32 = 'F'
Complex64 = 'D'
Complex8 = 'F'
Float = 'd'
Float0 = 'f'
Float16 = 'f'
Float32 = 'f'
Float64 = 'd'
Float8 = 'f'
Int = 'l'
Int0 = '1'
Int16 = 's'
Int32 = 'i'
Int8 = '1'
LittleEndian = 1
PrecisionError = 'PrecisionError'
PyObject = 'O'
UnsignedInt8 = 'b'
arraytype = <type 'array'>
e = 2.7182818284590451
pi = 3.1415926535897931