Commit aaec4da9 authored by Stéphane Adjemian's avatar Stéphane Adjemian

Removed remaining varargins in optimization related routines.

parent 98f24413
function g = apprgrdn(x,f,fun,deltax,obj,varargin)
% g = apprgrdn(x,f,fun,deltax,obj,varargin)
function g = apprgrdn(x,f,fun,deltax,obj)
% Performs the finite difference approximation of the gradient <g> at a
% point <x> used in solveopt
%
......@@ -13,8 +13,7 @@ function g = apprgrdn(x,f,fun,deltax,obj,varargin)
%
% Modified by Giovanni Lombardo and Johannes Pfeifer to accomodate Dynare
% structure
%
%
% Copyright (C) 1997-2008, Alexei Kuntsevich and Franz Kappel
% Copyright (C) 2008-2015 Giovanni Lombardo
% Copyright (C) 2015-2017 Dynare Team
......@@ -48,13 +47,13 @@ y=x;
g=NaN(n,1);
for i=1:n
y(i)=x(i)+di(i);
fi=feval(fun,y,varargin{:});
y(i) = x(i)+di(i);
fi = feval(fun, y);
if obj
if fi==f
for j=1:3
di(i)=di(i)*10; y(i)=x(i)+di(i);
fi=feval(fun,y,varargin{:});
fi = feval(fun, y);
if fi~=f
break
end
......@@ -65,7 +64,7 @@ for i=1:n
if obj
if ~isempty(idx) && any(idx==i)
y(i)=x(i)-di(i);
fi=feval(fun,y,varargin{:});
fi = feval(fun, y);
g(i)=.5*(g(i)+(f-fi)/di(i));
end
end
......
......@@ -2598,7 +2598,7 @@ if isempty(ORTHOGONALCOORSYSTEM_G) ...
end
f = felli(ORTHOGONALCOORSYSTEM_G{N}*x);
function f=frot(x, fun, varargin)
function f=frot(x, fun)
N = size(x,1);
global ORTHOGONALCOORSYSTEM_G
if isempty(ORTHOGONALCOORSYSTEM_G) ...
......@@ -2606,7 +2606,7 @@ if isempty(ORTHOGONALCOORSYSTEM_G) ...
|| isempty(ORTHOGONALCOORSYSTEM_G{N})
coordinatesystem(N);
end
f = feval(fun, ORTHOGONALCOORSYSTEM_G{N}*x, varargin{:});
f = feval(fun, ORTHOGONALCOORSYSTEM_G{N}*x);
function coordinatesystem(N)
if nargin < 1 || isempty(N)
......
function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,Verbose,Save_files,varargin)
%[fhat,xhat,ghat,Hhat,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,method,epsilon,varargin)
function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn, x0, H0, grad, crit, nit, method, epsilon, Verbose, Save_files)
% Inputs:
% fcn: [string] string naming the objective function to be minimized
% x0: [npar by 1] initial value of the parameter vector
......@@ -12,8 +12,6 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
% nit: [scalar] Maximum number of iterations.
% method: [scalar] integer scalar for selecting gradient method: 2, 3 or 5 points formula.
% epsilon: [scalar] scalar double, numerical differentiation increment
% varargin: Optional additional inputs that get handed off to fcn each
% time it is called.
%
% Note that if the program ends abnormally, it is possible to retrieve the current x,
% f, and H from the files g1.mat and H.mat that are written at each iteration and at each
......@@ -40,7 +38,7 @@ function [fh,xh,gh,H,itct,fcount,retcodeh] = csminwel1(fcn,x0,H0,grad,crit,nit,m
%
% Original file downloaded from:
% http://sims.princeton.edu/yftp/optimize/mfiles/csminwel.m
%
% Copyright (C) 1993-2007 Christopher Sims
% Copyright (C) 2006-2017 Dynare Team
%
......@@ -123,7 +121,7 @@ while ~done
% disp_verbose([sprintf('x = ') sprintf('%15.8g %15.8g %15.8g %15.8g\n',x)]);
%-------------------------
itct=itct+1;
[f1, x1, fc, retcode1] = csminit1(fcn,x,penalty,f,g,badg,H,Verbose,varargin{:});
[f1, x1, fc, retcode1] = csminit1(fcn, x, penalty, f, g, badg, H, Verbose);
fcount = fcount+fc;
% erased on 8/4/94
% if (retcode == 1) || (abs(f1-f) < crit)
......@@ -148,7 +146,7 @@ while ~done
wall1=badg1;
% g1
if Save_files
save('g1.mat','g1','x1','f1','varargin');
save('g1.mat','g1','x1','f1');
end
end
if wall1 % && (~done) by Jinill
......@@ -158,7 +156,7 @@ while ~done
%fcliff=fh;xcliff=xh;
Hcliff=H+diag(diag(H).*rand(nx,1));
disp_verbose('Cliff. Perturbing search direction.',Verbose)
[f2, x2, fc, retcode2] = csminit1(fcn,x,penalty,f,g,badg,Hcliff,Verbose,varargin{:});
[f2, x2, fc, retcode2] = csminit1(fcn, x, penalty, f, g, badg, Hcliff, Verbose);
fcount = fcount+fc; % put by Jinill
if f2 < f
if retcode2==2 || retcode2==4
......@@ -178,7 +176,7 @@ while ~done
badg2
end
if Save_files
save('g2.mat','g2','x2','f2','varargin');
save('g2.mat','g2','x2','f2');
end
end
if wall2
......@@ -197,17 +195,17 @@ while ~done
badg3=1;
else
if NumGrad
[g3, badg3]=get_num_grad(method,fcn,penalty,f3,x3,epsilon,varargin{:});
[g3, badg3] = get_num_grad(method, fcn, penalty, f3, x3, epsilon);
elseif ischar(grad)
[g3, badg3] = grad(x3,varargin{:});
[g3, badg3] = grad(x3);
else
[~,cost_flag,g3] = penalty_objective_function(x1,fcn,penalty,varargin{:});
[~, cost_flag, g3] = penalty_objective_function(x1, fcn, penalty);
badg3 = ~cost_flag;
end
wall3=badg3;
% g3
if Save_files
save('g3.mat','g3','x3','f3','varargin');
save('g3.mat','g3','x3','f3');
end
end
end
......@@ -257,11 +255,11 @@ while ~done
end
if nogh
if NumGrad
[gh, badgh]=get_num_grad(method,fcn,penalty,fh,xh,epsilon,varargin{:});
[gh, badgh] = get_num_grad(method, fcn, penalty, fh, xh, epsilon);
elseif ischar(grad)
[gh, badgh] = grad(xh,varargin{:});
[gh, badgh] = grad(xh);
else
[~,cost_flag,gh] = penalty_objective_function(x1,fcn,penalty,varargin{:});
[~, cost_flag, gh] = penalty_objective_function(x1, fcn, penalty);
badgh = ~cost_flag;
end
end
......
function [PostMode, HessianMatrix, Scale, ModeValue] = gmhmaxlik(fun, xinit, Hinit, iscale, bounds, priorstd, gmhmaxlikOptions, OptimizationOptions, varargin)
function [PostMode, HessianMatrix, Scale, ModeValue] = gmhmaxlik(fun, xinit, Hinit, iscale, bounds, priorstd, gmhmaxlikOptions, OptimizationOptions)
% Copyright (C) 2006-2017 Dynare Team
% Copyright (C) 2006-2019 Dynare Team
%
% This file is part of Dynare.
%
......@@ -62,7 +62,7 @@ if ~isempty(OptimizationOptions)
end
% Evaluate the objective function.
OldModeValue = feval(fun,xinit,varargin{:});
OldModeValue = feval(fun, xinit);
if ~exist('MeanPar','var')
MeanPar = xinit;
......@@ -97,8 +97,8 @@ for i=1:gmhmaxlikOptions.iterations
else
flag = 'LastCall';
end
[PostMode, PostVariance, Scale, PostMean] = gmhmaxlik_core(fun, OldPostMode, bounds, gmhmaxlikOptions, Scale, flag, MeanPar, OldPostVariance, varargin{:});
ModeValue = feval(fun, PostMode, varargin{:});
[PostMode, PostVariance, Scale, PostMean] = gmhmaxlik_core(fun, OldPostMode, bounds, gmhmaxlikOptions, Scale, flag, MeanPar, OldPostVariance);
ModeValue = feval(fun, PostMode);
dVariance = max(max(abs(PostVariance-OldPostVariance)));
dMean = max(abs(PostMean-OldPostMean));
skipline()
......
function [PostMod,PostVar,Scale,PostMean] = gmhmaxlik_core(ObjFun,xparam1,mh_bounds,options,iScale,info,MeanPar,VarCov,varargin)
function [PostMod, PostVar, Scale, PostMean] = gmhmaxlik_core(ObjFun, xparam1, mh_bounds, options, iScale, info, MeanPar, VarCov)
% (Dirty) Global minimization routine of (minus) a likelihood (or posterior density) function.
%
% INPUTS
% o ObjFun [char] string specifying the name of the objective function.
% o xparam1 [double] (p*1) vector of parameters to be estimated.
% o mh_bounds [double] (p*2) matrix defining lower and upper bounds for the parameters.
% o options [structure] options for the optimization algorithm (options_.gmhmaxlik).
% o iScale [double] scalar specifying the initial of the jumping distribution's scale parameter.
% o info [char] string, empty or equal to 'LastCall'.
% o MeanPar [double] (p*1) vector specifying the initial posterior mean.
% o VarCov [double] (p*p) matrix specifying the initial posterior covariance matrix.
% o gend [integer] scalar specifying the number of observations ==> varargin{1}.
% o data [double] (T*n) matrix of data ==> varargin{2}.
% - ObjFun [handle] function handle for the objective function.
% - xparam1 [double] p×1 vector, parameters to be estimated.
% - mh_bounds [double] p×2 matrix, lower and upper bounds for the parameters.
% - options [struct] options for the optimization algorithm (options_.gmhmaxlik).
% - iScale [double] scalar, specifying the initial of the jumping distribution's scale parameter.
% - info [char] empty or equal to 'LastCall'.
% - MeanPar [double] n×1 vector, the initial posterior mean.
% - VarCov [double] n×n matrix, the initial posterior covariance matrix.
%
% OUTPUTS
% o PostMod [double] (p*1) vector, evaluation of the posterior mode.
% o PostVar [double] (p*p) matrix, evaluation of the posterior covariance matrix.
% o Scale [double] scalar specifying the scale parameter that should be used in
% an eventual metropolis-hastings algorithm.
% o PostMean [double] (p*1) vector, evaluation of the posterior mean.
% - PostMod [double] n×1 vector, estimate of the posterior mode.
% - PostVar [double] n×n matrix, estimate of the posterior covariance matrix.
% - Scale [double] scalar, the scale parameter that should be used in an eventual metropolis-hastings algorithm.
% - PostMean [double] p×1 vector, estimate of the posterior mean.
%
% ALGORITHM
% Metropolis-Hastings with an constantly updated covariance matrix for
% the jump distribution. The posterior mean, variance and mode are
% the jumping distribution. The posterior mean, variance and mode are
% updated (in step 2) with the following rules:
%
% \[
% \mu_t = \mu_{t-1} + \frac{1}{t}\left(\theta_t-\mu_{t-1}\right)
% \]
%
% \[
% \Sigma_t = \Sigma_{t-1} + \mu_{t-1}\mu_{t-1}'-\mu_{t}\mu_{t}' +
% \frac{1}{t}\left(\theta_t\theta_t'-\Sigma_{t-1}-\mu_{t-1}\mu_{t-1}'\right)
% \]
% 1
% μₜ = μₜ₋₁ + ─ (θₜ - μₜ₋₁)
% t
% 1
% Σₜ = Σₜ₋₁ + μₜ₋₁μₜ₋₁' - μₜμₜ' + ─ ( θₜθₜ' - Σₜ₋₁ - μₜ₋₁μₜ₋₁' )
% t
%
% and
% ⎧ θₜ if p(θₜ|Y)>p(θₜ₋₁|Y)
% ⎪
% modeₜ = ⎨
% ⎪ θₜ₋₁ otherwise.
% ⎩
%
% \[
% \mathrm{mode}_t = \left\{
% \begin{array}{ll}
% \theta_t, & \hbox{if } p(\theta_t|\mathcal Y) > p(\mathrm{mode}_{t-1}|\mathcal Y) \\
% \mathrm{mode}_{t-1}, & \hbox{otherwise.}
% \end{array}
% \right.
% \]
%
% where $t$ is the iteration, $\mu_t$ the estimate of the posterior mean
% after $t$ iterations, $\Sigma_t$ the estimate of the posterior
% covariance matrix after $t$ iterations, $\mathrm{mode}_t$ is the
% evaluation of the posterior mode after $t$ iterations and
% $p(\theta_t|\mathcal Y)$ is the posterior density of parameters
% where t is the iteration, μₜ the estimate of the posterior mean
% after t iterations, Σₜ the estimate of the posterior
% covariance matrix after t iterations, modeₜ is the
% evaluation of the posterior mode after t iterations and
% p(θₜ|Y) is the posterior density (kernel) of parameters
% (specified by the user supplied function "fun").
%
% SPECIAL REQUIREMENTS
% None.
% Copyright (C) 2006-2017 Dynare Team
% Copyright (C) 2006-2019 Dynare Team
%
% This file is part of Dynare.
%
......@@ -92,7 +80,7 @@ set(hh,'Name','Tuning of the scale parameter.');
j = 1; jj = 1;
isux = 0; jsux = 0; test = 0;
ix2 = ModePar;% initial condition!
ilogpo2 = - feval(ObjFun,ix2,varargin{:});% initial posterior density
ilogpo2 = - feval(ObjFun, ix2);% initial posterior density
mlogpo2 = ilogpo2;
try
dd = transpose(chol(CovJump));
......@@ -102,7 +90,7 @@ end
while j<=MaxNumberOfTuningSimulations
proposal = iScale*dd*randn(npar,1) + ix2;
if all(proposal > mh_bounds(:,1)) && all(proposal < mh_bounds(:,2))
logpo2 = - feval(ObjFun,proposal,varargin{:});
logpo2 = - feval(ObjFun, proposal);
else
logpo2 = -inf;
end
......@@ -147,11 +135,11 @@ hh = dyn_waitbar(0,'Metropolis-Hastings...');
set(hh,'Name','Estimation of the posterior covariance...'),
j = 1;
isux = 0;
ilogpo2 = - feval(ObjFun,ix2,varargin{:});
ilogpo2 = - feval(ObjFun, ix2);
while j<= NumberOfIterations
proposal = iScale*dd*randn(npar,1) + ix2;
if all(proposal > mh_bounds(:,1)) && all(proposal < mh_bounds(:,2))
logpo2 = - feval(ObjFun,proposal,varargin{:});
logpo2 = - feval(ObjFun, proposal);
else
logpo2 = -inf;
end
......@@ -190,12 +178,12 @@ if strcmpi(info,'LastCall')
j = 1; jj = 1;
isux = 0; jsux = 0;
test = 0;
ilogpo2 = - feval(ObjFun,ix2,varargin{:});% initial posterior density
ilogpo2 = - feval(ObjFun, ix2);% initial posterior density
dd = transpose(chol(CovJump));
while j<=MaxNumberOfTuningSimulations
proposal = iScale*dd*randn(npar,1) + ix2;
if all(proposal > mh_bounds(:,1)) && all(proposal < mh_bounds(:,2))
logpo2 = - feval(ObjFun,proposal,varargin{:});
logpo2 = - feval(ObjFun, proposal);
else
logpo2 = -inf;
end
......@@ -243,7 +231,7 @@ if strcmpi(info,'LastCall')
while j<=MaxNumberOfClimbingSimulations
proposal = iScale*dd*randn(npar,1) + ModePar;
if all(proposal > mh_bounds(:,1)) && all(proposal < mh_bounds(:,2))
logpo2 = - feval(ObjFun,proposal,varargin{:});
logpo2 = - feval(ObjFun,proposal);
else
logpo2 = -inf;
end
......
function [f0, x, ig] = mr_gstep(h1, x, bounds, func0, penalty, htol0, Verbose, Save_files, gepsilon, pnames)
% Gibbs type step in optimisation
%
% varargin{1} --> DynareDataset
% varargin{2} --> DatasetInfo
% varargin{3} --> DynareOptions
% varargin{4} --> Model
% varargin{5} --> EstimatedParameters
% varargin{6} --> BayesInfo
% varargin{1} --> DynareResults
% Copyright © 2006-2019 Dynare Team
%
......
function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1, hess_info] = mr_hessian(x, func, penalty, hflag, htol0, hess_info, bounds, priorstd)
% function [hessian_mat, gg, htol1, ihh, hh_mat0, hh1, hess_info] = mr_hessian(x,func,penalty,hflag,htol0,hess_info,varargin)
% numerical gradient and Hessian, with 'automatic' check of numerical
% error
%
% adapted from Michel Juillard original routine hessian.m
% Computes hessian matrix.
%
% INPUTS
% - func [handle] function handle. The function must give two outputs: the log-likelihood AND the single contributions at times t=1,...,T of the log-likelihood to compute outer product gradient
......
function [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, priorstd, gepsilon, pnames)
% [xparam1, hh, gg, fval, igg, hess_info] = newrat(func0, x, bounds, analytic_derivation, ftol0, nit, flagg, Verbose, Save_files, hess_info, varargin)
%
% Optimiser with outer product gradient and with sequences of univariate steps
% uses Chris Sims subroutine for line search
%
......
......@@ -37,12 +37,12 @@ for i=1:n
xiold = x(i);
h = step_length_correction(xiold,scale,i)*delta;
x(i) = xiold + h;
[f1,~,cost_flag1] = penalty_objective_function(x, fcn, penalty, varargin);
[f1,~,cost_flag1] = penalty_objective_function(x, fcn, penalty);
if ~cost_flag1
fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the right!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
end
x(i) = xiold - h;
[f2,~,cost_flag2] = penalty_objective_function(x, fcn, penalty, varargin);
[f2,~,cost_flag2] = penalty_objective_function(x, fcn, penalty);
if ~cost_flag2
fprintf('Gradient w.r.t. parameter number %3d (x=%16.8f,+h=%16.8f,f0=%16.8f,f1=%16.8f,f2=%16.8f,g0=%16.8f): penalty on the left!\n',i,xiold,h,f0,f1,f2,(f1 - f2) / (2*h))
end
......
function [xopt, fopt,exitflag, n_accepted_draws, n_total_draws, n_out_of_bounds_draws, t, vm] = ...
simulated_annealing(fcn,x,optim,lb,ub,varargin)
simulated_annealing(fcn, x, optim, lb, ub)
% function [xopt, fopt,exitflag, n_accepted_draws, n_total_draws, n_out_of_bounds_draws, t, vm] = ...
% simulated_annealing(fcn,x,optim,lb,ub,varargin)
% simulated_annealing(fcn,x,optim,lb,ub)
%
% Implements the continuous simulated annealing global optimization
% algorithm described in Corana et al. (1987)
......@@ -206,7 +206,7 @@ if(sum(x>ub)+sum(x<lb)>0)
return
end
%* Evaluate the function with input x and return value as f. *
f=feval(fcn,x,varargin{:});
f=feval(fcn, x);
%*
% If the function is to be minimized, switch the sign of the function.
% Note that all intermediate and final output switches the sign back
......@@ -266,7 +266,7 @@ while (1>0)
end
%* Evaluate the function with the trial point xp and return as fp. *
% fp=feval(fcn,xp,listarg);
fp=feval(fcn,xp,varargin{:});
fp=feval(fcn, xp);
if(optim.maximizer_indicator==0)
fp=-fp;
end
......
function [x,f,exitflag,n_f_evals,n_grad_evals,n_constraint_evals,n_constraint_gradient_evals]=solvopt(x,fun,grad,func,gradc,optim,varargin)
% [x,f,options]=solvopt(x,fun,grad,func,gradc,options,varargin)
function [x,f,exitflag,n_f_evals,n_grad_evals,n_constraint_evals,n_constraint_gradient_evals]=solvopt(x,fun,grad,func,gradc,optim)
% [x,f,options]=solvopt(x,fun,grad,func,gradc,options)
%
% The function SOLVOPT, developed by Alexei Kuntsevich and Franz Kappe,
% performs a modified version of Shor's r-algorithm in
......@@ -271,9 +271,9 @@ stopf=0;
% COMPUTE THE FUNCTION ( FIRST TIME ) ----{
if trx
f=feval(fun,x',varargin{:});
f = feval(fun, x');
else
f=feval(fun,x,varargin{:});
f = feval(fun, x);
end
n_f_evals=n_f_evals+1;
if isempty(f)
......@@ -365,23 +365,23 @@ if app
deltax=h1*ddx*ones(size(x));
if constr
if trx
g=apprgrdn(x',fp,fun,deltax',1,varargin{:});
g = apprgrdn(x', fp, fun, deltax', 1);
else
g=apprgrdn(x ,fp,fun,deltax,1,varargin{:});
g = apprgrdn(x, fp, fun, deltax, 1);
end
else
if trx
g=apprgrdn(x',f,fun,deltax',1,varargin{:});
g = apprgrdn(x', f, fun, deltax', 1);
else
g=apprgrdn(x ,f,fun,deltax,1,varargin{:});
g = apprgrdn(x, f, fun, deltax, 1);
end
end
n_f_evals=n_f_evals+n;
n_f_evals = n_f_evals+n;
else
if trx
g=feval(grad,x',varargin{:});
g = feval(grad, x');
else
g=feval(grad,x,varargin{:});
g = feval(grad, x);
end
n_grad_evals=n_grad_evals+1;
end
......@@ -601,9 +601,9 @@ while 1
x=x+hp*g0;
% FUNCTION VALUE
if trx
f=feval(fun,x',varargin{:});
f = feval(fun, x');
else
f=feval(fun,x,varargin{:});
f = feval(fun, x);
end
n_f_evals=n_f_evals+1;
if h1*f==Inf
......@@ -773,28 +773,28 @@ while 1
deltax(idx)=ones(size(idx)); deltax=h1*ddx*deltax;
if constr
if trx
g=apprgrdn(x',fp,fun,deltax',1,varargin{:});
g = apprgrdn(x', fp, fun, deltax', 1);
else
g=apprgrdn(x ,fp,fun,deltax,1,varargin{:});
g = apprgrdn(x, fp, fun, deltax, 1);
end
else
if trx
g=apprgrdn(x',f,fun,deltax',1,varargin{:});
g = apprgrdn(x', f, fun, deltax', 1);
else
g=apprgrdn(x ,f,fun,deltax ,1,varargin{:});
g = apprgrdn(x, f, fun, deltax ,1);
end
end
n_f_evals=n_f_evals+n;
else
if trx
g=feval(grad,x',varargin{:});
g = feval(grad, x');
else
g=feval(grad,x,varargin{:});
g = feval(grad, x);
end
n_grad_evals=n_grad_evals+1;
n_grad_evals = n_grad_evals+1;
end
if size(g,2)==1
g=g'
g=g';
end
ng=norm(g);
if isnan(ng)
......@@ -1066,9 +1066,9 @@ while 1
for i=1:10
x=x+g0;
if trx
f=feval(fun,x',varargin{:});
f = feval(fun, x');
else
f=feval(fun,x,varargin{:});
f = feval(fun, x);
end
n_f_evals=n_f_evals+1;
if abs(f)==Inf
......@@ -1098,16 +1098,16 @@ while 1
deltax(idx)=ones(size(idx));
deltax=h1*ddx*deltax;
if trx
g=apprgrdn(x',f,fun,deltax',1,varargin{:});
g = apprgrdn(x', f, fun, deltax', 1);
else
g=apprgrdn(x,f,fun,deltax,1,varargin{:});
g = apprgrdn(x, f, fun, deltax, 1);
end
n_f_evals=n_f_evals+n;
else
if trx
g=feval(grad,x',varargin{:});
g = feval(grad, x');
else
g=feval(grad,x,varargin{:});
g = feval(grad, x);
end
n_grad_evals=n_grad_evals+1;
end
......@@ -1180,9 +1180,9 @@ while 1
for i=1:20
x1(j)=x1(j)/1.15;
if trx
f1=feval(fun,x1',varargin{:});
f1 = feval(fun, x1');
else
f1=feval(fun,x1,varargin{:});
f1 = feval(fun, x1);
end
n_f_evals=n_f_evals+1;
if abs(f1)~=Inf && ~isnan(f1)
......@@ -1203,16 +1203,16 @@ while 1
if app
deltax=h1*ddx*ones(size(deltax));
if trx
gt=apprgrdn(x1',fm,fun,deltax',1,varargin{:});
gt = apprgrdn(x1', fm, fun, deltax', 1);
else
gt=apprgrdn(x1 ,fm,fun,deltax ,1,varargin{:});
gt = apprgrdn(x1, fm, fun, deltax , 1);
end
n_f_evals=n_f_evals+n;
else
if trx
gt=feval(grad,x1',varargin{:});
gt = feval(grad, x1');
else
gt=feval(grad,x1,varargin{:});
gt = feval(grad, x1);
end
n_grad_evals=n_grad_evals+1;
end
......
function [opt_par_values,fval,exitflag]=optimizer_function_wrapper(objective_function_handle,start_par_value,varargin)
% function [opt_par_values,fval,exitflag]=optimizer_function_wrapper(objective_function_handle,start_par_value,varargin)
function [opt_par_values, fval, exitflag] = optimizer_function_wrapper(objective_function_handle, start_par_value)
% Demonstrates how to invoke external optimizer for mode_computation
%set options of optimizer
......@@ -12,6 +12,6 @@ analytic_grad=[];
Verbose=1;
Save_files=1;
%call optimizer
[fval,opt_par_values,grad,hessian_mat,itct,fcount,exitflag] = ...
csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose,Save_files, varargin{:});
[fval, opt_par_values, ~, ~, ~, ~,exitflag] = ...
csminwel1(objective_function_handle, start_par_value, H0, analytic_grad, crit, nit, numgrad, epsilon, Verbose, Save_files);
end
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment