summaryrefslogtreecommitdiff
path: root/src/main/java/org/apache/commons/math3/optimization/general
diff options
context:
space:
mode:
Diffstat (limited to 'src/main/java/org/apache/commons/math3/optimization/general')
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/AbstractDifferentiableOptimizer.java90
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/AbstractLeastSquaresOptimizer.java577
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/AbstractScalarDifferentiableOptimizer.java114
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/ConjugateGradientFormula.java50
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/GaussNewtonOptimizer.java194
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/LevenbergMarquardtOptimizer.java943
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/NonLinearConjugateGradientOptimizer.java311
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/Preconditioner.java46
-rw-r--r--src/main/java/org/apache/commons/math3/optimization/general/package-info.java22
9 files changed, 2347 insertions, 0 deletions
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/AbstractDifferentiableOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/AbstractDifferentiableOptimizer.java
new file mode 100644
index 0000000..d175863
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/AbstractDifferentiableOptimizer.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.differentiation.GradientFunction;
+import org.apache.commons.math3.analysis.differentiation.MultivariateDifferentiableFunction;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.optimization.GoalType;
+import org.apache.commons.math3.optimization.OptimizationData;
+import org.apache.commons.math3.optimization.InitialGuess;
+import org.apache.commons.math3.optimization.PointValuePair;
+import org.apache.commons.math3.optimization.direct.BaseAbstractMultivariateOptimizer;
+
+/**
+ * Base class for implementing optimizers for multivariate scalar
+ * differentiable functions.
+ * It contains boiler-plate code for dealing with gradient evaluation.
+ *
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 3.1
+ */
+@Deprecated
+public abstract class AbstractDifferentiableOptimizer
+ extends BaseAbstractMultivariateOptimizer<MultivariateDifferentiableFunction> {
+ /**
+ * Objective function gradient.
+ */
+ private MultivariateVectorFunction gradient;
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected AbstractDifferentiableOptimizer(ConvergenceChecker<PointValuePair> checker) {
+ super(checker);
+ }
+
+ /**
+ * Compute the gradient vector.
+ *
+ * @param evaluationPoint Point at which the gradient must be evaluated.
+ * @return the gradient at the specified point.
+ */
+ protected double[] computeObjectiveGradient(final double[] evaluationPoint) {
+ return gradient.value(evaluationPoint);
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @deprecated In 3.1. Please use
+ * {@link #optimizeInternal(int,MultivariateDifferentiableFunction,GoalType,OptimizationData[])}
+ * instead.
+ */
+ @Override@Deprecated
+ protected PointValuePair optimizeInternal(final int maxEval,
+ final MultivariateDifferentiableFunction f,
+ final GoalType goalType,
+ final double[] startPoint) {
+ return optimizeInternal(maxEval, f, goalType, new InitialGuess(startPoint));
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair optimizeInternal(final int maxEval,
+ final MultivariateDifferentiableFunction f,
+ final GoalType goalType,
+ final OptimizationData... optData) {
+ // Store optimization problem characteristics.
+ gradient = new GradientFunction(f);
+
+ // Perform optimization.
+ return super.optimizeInternal(maxEval, f, goalType, optData);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/AbstractLeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/AbstractLeastSquaresOptimizer.java
new file mode 100644
index 0000000..96f7fb2
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/AbstractLeastSquaresOptimizer.java
@@ -0,0 +1,577 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+import org.apache.commons.math3.analysis.DifferentiableMultivariateVectorFunction;
+import org.apache.commons.math3.analysis.FunctionUtils;
+import org.apache.commons.math3.analysis.differentiation.DerivativeStructure;
+import org.apache.commons.math3.analysis.differentiation.MultivariateDifferentiableVectorFunction;
+import org.apache.commons.math3.exception.DimensionMismatchException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+import org.apache.commons.math3.linear.DecompositionSolver;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.EigenDecomposition;
+import org.apache.commons.math3.optimization.OptimizationData;
+import org.apache.commons.math3.optimization.InitialGuess;
+import org.apache.commons.math3.optimization.Target;
+import org.apache.commons.math3.optimization.Weight;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.optimization.DifferentiableMultivariateVectorOptimizer;
+import org.apache.commons.math3.optimization.PointVectorValuePair;
+import org.apache.commons.math3.optimization.direct.BaseAbstractMultivariateVectorOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Base class for implementing least squares optimizers.
+ * It handles the boilerplate methods associated to thresholds settings,
+ * Jacobian and error estimation.
+ * <br/>
+ * This class constructs the Jacobian matrix of the function argument in method
+ * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
+ * org.apache.commons.math3.analysis.MultivariateVectorFunction,OptimizationData[])
+ * optimize} and assumes that the rows of that matrix iterate on the model
+ * functions while the columns iterate on the parameters; thus, the numbers
+ * of rows is equal to the dimension of the
+ * {@link org.apache.commons.math3.optimization.Target Target} while
+ * the number of columns is equal to the dimension of the
+ * {@link org.apache.commons.math3.optimization.InitialGuess InitialGuess}.
+ *
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 1.2
+ */
+@Deprecated
+public abstract class AbstractLeastSquaresOptimizer
+ extends BaseAbstractMultivariateVectorOptimizer<DifferentiableMultivariateVectorFunction>
+ implements DifferentiableMultivariateVectorOptimizer {
+ /**
+ * Singularity threshold (cf. {@link #getCovariances(double)}).
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ private static final double DEFAULT_SINGULARITY_THRESHOLD = 1e-14;
+ /**
+ * Jacobian matrix of the weighted residuals.
+ * This matrix is in canonical form just after the calls to
+ * {@link #updateJacobian()}, but may be modified by the solver
+ * in the derived class (the {@link LevenbergMarquardtOptimizer
+ * Levenberg-Marquardt optimizer} does this).
+ * @deprecated As of 3.1. To be removed in 4.0. Please use
+ * {@link #computeWeightedJacobian(double[])} instead.
+ */
+ @Deprecated
+ protected double[][] weightedResidualJacobian;
+ /** Number of columns of the jacobian matrix.
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ protected int cols;
+ /** Number of rows of the jacobian matrix.
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ protected int rows;
+ /** Current point.
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ protected double[] point;
+ /** Current objective function value.
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ protected double[] objective;
+ /** Weighted residuals
+ * @deprecated As of 3.1.
+ */
+ @Deprecated
+ protected double[] weightedResiduals;
+ /** Cost value (square root of the sum of the residuals).
+ * @deprecated As of 3.1. Field to become "private" in 4.0.
+ * Please use {@link #setCost(double)}.
+ */
+ @Deprecated
+ protected double cost;
+ /** Objective function derivatives. */
+ private MultivariateDifferentiableVectorFunction jF;
+ /** Number of evaluations of the Jacobian. */
+ private int jacobianEvaluations;
+ /** Square-root of the weight matrix. */
+ private RealMatrix weightMatrixSqrt;
+
+ /**
+ * Simple constructor with default settings.
+ * The convergence check is set to a {@link
+ * org.apache.commons.math3.optimization.SimpleVectorValueChecker}.
+ * @deprecated See {@link org.apache.commons.math3.optimization.SimpleValueChecker#SimpleValueChecker()}
+ */
+ @Deprecated
+ protected AbstractLeastSquaresOptimizer() {}
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected AbstractLeastSquaresOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
+ super(checker);
+ }
+
+ /**
+ * @return the number of evaluations of the Jacobian function.
+ */
+ public int getJacobianEvaluations() {
+ return jacobianEvaluations;
+ }
+
+ /**
+ * Update the jacobian matrix.
+ *
+ * @throws DimensionMismatchException if the Jacobian dimension does not
+ * match problem dimension.
+ * @deprecated As of 3.1. Please use {@link #computeWeightedJacobian(double[])}
+ * instead.
+ */
+ @Deprecated
+ protected void updateJacobian() {
+ final RealMatrix weightedJacobian = computeWeightedJacobian(point);
+ weightedResidualJacobian = weightedJacobian.scalarMultiply(-1).getData();
+ }
+
+ /**
+ * Computes the Jacobian matrix.
+ *
+ * @param params Model parameters at which to compute the Jacobian.
+ * @return the weighted Jacobian: W<sup>1/2</sup> J.
+ * @throws DimensionMismatchException if the Jacobian dimension does not
+ * match problem dimension.
+ * @since 3.1
+ */
+ protected RealMatrix computeWeightedJacobian(double[] params) {
+ ++jacobianEvaluations;
+
+ final DerivativeStructure[] dsPoint = new DerivativeStructure[params.length];
+ final int nC = params.length;
+ for (int i = 0; i < nC; ++i) {
+ dsPoint[i] = new DerivativeStructure(nC, 1, i, params[i]);
+ }
+ final DerivativeStructure[] dsValue = jF.value(dsPoint);
+ final int nR = getTarget().length;
+ if (dsValue.length != nR) {
+ throw new DimensionMismatchException(dsValue.length, nR);
+ }
+ final double[][] jacobianData = new double[nR][nC];
+ for (int i = 0; i < nR; ++i) {
+ int[] orders = new int[nC];
+ for (int j = 0; j < nC; ++j) {
+ orders[j] = 1;
+ jacobianData[i][j] = dsValue[i].getPartialDerivative(orders);
+ orders[j] = 0;
+ }
+ }
+
+ return weightMatrixSqrt.multiply(MatrixUtils.createRealMatrix(jacobianData));
+ }
+
+ /**
+ * Update the residuals array and cost function value.
+ * @throws DimensionMismatchException if the dimension does not match the
+ * problem dimension.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations is exceeded.
+ * @deprecated As of 3.1. Please use {@link #computeResiduals(double[])},
+ * {@link #computeObjectiveValue(double[])}, {@link #computeCost(double[])}
+ * and {@link #setCost(double)} instead.
+ */
+ @Deprecated
+ protected void updateResidualsAndCost() {
+ objective = computeObjectiveValue(point);
+ final double[] res = computeResiduals(objective);
+
+ // Compute cost.
+ cost = computeCost(res);
+
+ // Compute weighted residuals.
+ final ArrayRealVector residuals = new ArrayRealVector(res);
+ weightedResiduals = weightMatrixSqrt.operate(residuals).toArray();
+ }
+
+ /**
+ * Computes the cost.
+ *
+ * @param residuals Residuals.
+ * @return the cost.
+ * @see #computeResiduals(double[])
+ * @since 3.1
+ */
+ protected double computeCost(double[] residuals) {
+ final ArrayRealVector r = new ArrayRealVector(residuals);
+ return FastMath.sqrt(r.dotProduct(getWeight().operate(r)));
+ }
+
+ /**
+ * Get the Root Mean Square value.
+ * Get the Root Mean Square value, i.e. the root of the arithmetic
+ * mean of the square of all weighted residuals. This is related to the
+ * criterion that is minimized by the optimizer as follows: if
+ * <em>c</em> if the criterion, and <em>n</em> is the number of
+ * measurements, then the RMS is <em>sqrt (c/n)</em>.
+ *
+ * @return RMS value
+ */
+ public double getRMS() {
+ return FastMath.sqrt(getChiSquare() / rows);
+ }
+
+ /**
+ * Get a Chi-Square-like value assuming the N residuals follow N
+ * distinct normal distributions centered on 0 and whose variances are
+ * the reciprocal of the weights.
+ * @return chi-square value
+ */
+ public double getChiSquare() {
+ return cost * cost;
+ }
+
+ /**
+ * Gets the square-root of the weight matrix.
+ *
+ * @return the square-root of the weight matrix.
+ * @since 3.1
+ */
+ public RealMatrix getWeightSquareRoot() {
+ return weightMatrixSqrt.copy();
+ }
+
+ /**
+ * Sets the cost.
+ *
+ * @param cost Cost value.
+ * @since 3.1
+ */
+ protected void setCost(double cost) {
+ this.cost = cost;
+ }
+
+ /**
+ * Get the covariance matrix of the optimized parameters.
+ *
+ * @return the covariance matrix.
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed (singular problem).
+ * @see #getCovariances(double)
+ * @deprecated As of 3.1. Please use {@link #computeCovariances(double[],double)}
+ * instead.
+ */
+ @Deprecated
+ public double[][] getCovariances() {
+ return getCovariances(DEFAULT_SINGULARITY_THRESHOLD);
+ }
+
+ /**
+ * Get the covariance matrix of the optimized parameters.
+ * <br/>
+ * Note that this operation involves the inversion of the
+ * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
+ * Jacobian matrix.
+ * The {@code threshold} parameter is a way for the caller to specify
+ * that the result of this computation should be considered meaningless,
+ * and thus trigger an exception.
+ *
+ * @param threshold Singularity threshold.
+ * @return the covariance matrix.
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed (singular problem).
+ * @deprecated As of 3.1. Please use {@link #computeCovariances(double[],double)}
+ * instead.
+ */
+ @Deprecated
+ public double[][] getCovariances(double threshold) {
+ return computeCovariances(point, threshold);
+ }
+
+ /**
+ * Get the covariance matrix of the optimized parameters.
+ * <br/>
+ * Note that this operation involves the inversion of the
+ * <code>J<sup>T</sup>J</code> matrix, where {@code J} is the
+ * Jacobian matrix.
+ * The {@code threshold} parameter is a way for the caller to specify
+ * that the result of this computation should be considered meaningless,
+ * and thus trigger an exception.
+ *
+ * @param params Model parameters.
+ * @param threshold Singularity threshold.
+ * @return the covariance matrix.
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed (singular problem).
+ * @since 3.1
+ */
+ public double[][] computeCovariances(double[] params,
+ double threshold) {
+ // Set up the Jacobian.
+ final RealMatrix j = computeWeightedJacobian(params);
+
+ // Compute transpose(J)J.
+ final RealMatrix jTj = j.transpose().multiply(j);
+
+ // Compute the covariances matrix.
+ final DecompositionSolver solver
+ = new QRDecomposition(jTj, threshold).getSolver();
+ return solver.getInverse().getData();
+ }
+
+ /**
+ * <p>
+ * Returns an estimate of the standard deviation of each parameter. The
+ * returned values are the so-called (asymptotic) standard errors on the
+ * parameters, defined as {@code sd(a[i]) = sqrt(S / (n - m) * C[i][i])},
+ * where {@code a[i]} is the optimized value of the {@code i}-th parameter,
+ * {@code S} is the minimized value of the sum of squares objective function
+ * (as returned by {@link #getChiSquare()}), {@code n} is the number of
+ * observations, {@code m} is the number of parameters and {@code C} is the
+ * covariance matrix.
+ * </p>
+ * <p>
+ * See also
+ * <a href="http://en.wikipedia.org/wiki/Least_squares">Wikipedia</a>,
+ * or
+ * <a href="http://mathworld.wolfram.com/LeastSquaresFitting.html">MathWorld</a>,
+ * equations (34) and (35) for a particular case.
+ * </p>
+ *
+ * @return an estimate of the standard deviation of the optimized parameters
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed.
+ * @throws NumberIsTooSmallException if the number of degrees of freedom is not
+ * positive, i.e. the number of measurements is less or equal to the number of
+ * parameters.
+ * @deprecated as of version 3.1, {@link #computeSigma(double[],double)} should be used
+ * instead. It should be emphasized that {@code guessParametersErrors} and
+ * {@code computeSigma} are <em>not</em> strictly equivalent.
+ */
+ @Deprecated
+ public double[] guessParametersErrors() {
+ if (rows <= cols) {
+ throw new NumberIsTooSmallException(LocalizedFormats.NO_DEGREES_OF_FREEDOM,
+ rows, cols, false);
+ }
+ double[] errors = new double[cols];
+ final double c = FastMath.sqrt(getChiSquare() / (rows - cols));
+ double[][] covar = computeCovariances(point, 1e-14);
+ for (int i = 0; i < errors.length; ++i) {
+ errors[i] = FastMath.sqrt(covar[i][i]) * c;
+ }
+ return errors;
+ }
+
+ /**
+ * Computes an estimate of the standard deviation of the parameters. The
+ * returned values are the square root of the diagonal coefficients of the
+ * covariance matrix, {@code sd(a[i]) ~= sqrt(C[i][i])}, where {@code a[i]}
+ * is the optimized value of the {@code i}-th parameter, and {@code C} is
+ * the covariance matrix.
+ *
+ * @param params Model parameters.
+ * @param covarianceSingularityThreshold Singularity threshold (see
+ * {@link #computeCovariances(double[],double) computeCovariances}).
+ * @return an estimate of the standard deviation of the optimized parameters
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed.
+ * @since 3.1
+ */
+ public double[] computeSigma(double[] params,
+ double covarianceSingularityThreshold) {
+ final int nC = params.length;
+ final double[] sig = new double[nC];
+ final double[][] cov = computeCovariances(params, covarianceSingularityThreshold);
+ for (int i = 0; i < nC; ++i) {
+ sig[i] = FastMath.sqrt(cov[i][i]);
+ }
+ return sig;
+ }
+
+ /** {@inheritDoc}
+ * @deprecated As of 3.1. Please use
+ * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
+ * org.apache.commons.math3.analysis.MultivariateVectorFunction,OptimizationData[])
+ * optimize(int,MultivariateDifferentiableVectorFunction,OptimizationData...)}
+ * instead.
+ */
+ @Override
+ @Deprecated
+ public PointVectorValuePair optimize(int maxEval,
+ final DifferentiableMultivariateVectorFunction f,
+ final double[] target, final double[] weights,
+ final double[] startPoint) {
+ return optimizeInternal(maxEval,
+ FunctionUtils.toMultivariateDifferentiableVectorFunction(f),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(startPoint));
+ }
+
+ /**
+ * Optimize an objective function.
+ * Optimization is considered to be a weighted least-squares minimization.
+ * The cost function to be minimized is
+ * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
+ *
+ * @param f Objective function.
+ * @param target Target value for the objective functions at optimum.
+ * @param weights Weights for the least squares cost computation.
+ * @param startPoint Start point for optimization.
+ * @return the point/value pair giving the optimal value for objective
+ * function.
+ * @param maxEval Maximum number of function evaluations.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the start point dimension is wrong.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations is exceeded.
+ * @throws org.apache.commons.math3.exception.NullArgumentException if
+ * any argument is {@code null}.
+ * @deprecated As of 3.1. Please use
+ * {@link BaseAbstractMultivariateVectorOptimizer#optimize(int,
+ * org.apache.commons.math3.analysis.MultivariateVectorFunction,OptimizationData[])
+ * optimize(int,MultivariateDifferentiableVectorFunction,OptimizationData...)}
+ * instead.
+ */
+ @Deprecated
+ public PointVectorValuePair optimize(final int maxEval,
+ final MultivariateDifferentiableVectorFunction f,
+ final double[] target, final double[] weights,
+ final double[] startPoint) {
+ return optimizeInternal(maxEval, f,
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(startPoint));
+ }
+
+ /**
+ * Optimize an objective function.
+ * Optimization is considered to be a weighted least-squares minimization.
+ * The cost function to be minimized is
+ * <code>&sum;weight<sub>i</sub>(objective<sub>i</sub> - target<sub>i</sub>)<sup>2</sup></code>
+ *
+ * @param maxEval Allowed number of evaluations of the objective function.
+ * @param f Objective function.
+ * @param optData Optimization data. The following data will be looked for:
+ * <ul>
+ * <li>{@link Target}</li>
+ * <li>{@link Weight}</li>
+ * <li>{@link InitialGuess}</li>
+ * </ul>
+ * @return the point/value pair giving the optimal value of the objective
+ * function.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException if
+ * the maximal number of evaluations is exceeded.
+ * @throws DimensionMismatchException if the target, and weight arguments
+ * have inconsistent dimensions.
+ * @see BaseAbstractMultivariateVectorOptimizer#optimizeInternal(int,
+ * org.apache.commons.math3.analysis.MultivariateVectorFunction,OptimizationData[])
+ * @since 3.1
+ * @deprecated As of 3.1. Override is necessary only until this class's generic
+ * argument is changed to {@code MultivariateDifferentiableVectorFunction}.
+ */
+ @Deprecated
+ protected PointVectorValuePair optimizeInternal(final int maxEval,
+ final MultivariateDifferentiableVectorFunction f,
+ OptimizationData... optData) {
+ // XXX Conversion will be removed when the generic argument of the
+ // base class becomes "MultivariateDifferentiableVectorFunction".
+ return super.optimizeInternal(maxEval, FunctionUtils.toDifferentiableMultivariateVectorFunction(f), optData);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected void setUp() {
+ super.setUp();
+
+ // Reset counter.
+ jacobianEvaluations = 0;
+
+ // Square-root of the weight matrix.
+ weightMatrixSqrt = squareRoot(getWeight());
+
+ // Store least squares problem characteristics.
+ // XXX The conversion won't be necessary when the generic argument of
+ // the base class becomes "MultivariateDifferentiableVectorFunction".
+ // XXX "jF" is not strictly necessary anymore but is currently more
+ // efficient than converting the value returned from "getObjectiveFunction()"
+ // every time it is used.
+ jF = FunctionUtils.toMultivariateDifferentiableVectorFunction((DifferentiableMultivariateVectorFunction) getObjectiveFunction());
+
+ // Arrays shared with "private" and "protected" methods.
+ point = getStartPoint();
+ rows = getTarget().length;
+ cols = point.length;
+ }
+
+ /**
+ * Computes the residuals.
+ * The residual is the difference between the observed (target)
+ * values and the model (objective function) value.
+ * There is one residual for each element of the vector-valued
+ * function.
+ *
+ * @param objectiveValue Value of the the objective function. This is
+ * the value returned from a call to
+ * {@link #computeObjectiveValue(double[]) computeObjectiveValue}
+ * (whose array argument contains the model parameters).
+ * @return the residuals.
+ * @throws DimensionMismatchException if {@code params} has a wrong
+ * length.
+ * @since 3.1
+ */
+ protected double[] computeResiduals(double[] objectiveValue) {
+ final double[] target = getTarget();
+ if (objectiveValue.length != target.length) {
+ throw new DimensionMismatchException(target.length,
+ objectiveValue.length);
+ }
+
+ final double[] residuals = new double[target.length];
+ for (int i = 0; i < target.length; i++) {
+ residuals[i] = target[i] - objectiveValue[i];
+ }
+
+ return residuals;
+ }
+
+ /**
+ * Computes the square-root of the weight matrix.
+ *
+ * @param m Symmetric, positive-definite (weight) matrix.
+ * @return the square-root of the weight matrix.
+ */
+ private RealMatrix squareRoot(RealMatrix m) {
+ if (m instanceof DiagonalMatrix) {
+ final int dim = m.getRowDimension();
+ final RealMatrix sqrtM = new DiagonalMatrix(dim);
+ for (int i = 0; i < dim; i++) {
+ sqrtM.setEntry(i, i, FastMath.sqrt(m.getEntry(i, i)));
+ }
+ return sqrtM;
+ } else {
+ final EigenDecomposition dec = new EigenDecomposition(m);
+ return dec.getSquareRoot();
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/AbstractScalarDifferentiableOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/AbstractScalarDifferentiableOptimizer.java
new file mode 100644
index 0000000..3947c2c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/AbstractScalarDifferentiableOptimizer.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+import org.apache.commons.math3.analysis.DifferentiableMultivariateFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.FunctionUtils;
+import org.apache.commons.math3.analysis.differentiation.MultivariateDifferentiableFunction;
+import org.apache.commons.math3.optimization.DifferentiableMultivariateOptimizer;
+import org.apache.commons.math3.optimization.GoalType;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.optimization.PointValuePair;
+import org.apache.commons.math3.optimization.direct.BaseAbstractMultivariateOptimizer;
+
+/**
+ * Base class for implementing optimizers for multivariate scalar
+ * differentiable functions.
+ * It contains boiler-plate code for dealing with gradient evaluation.
+ *
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ */
+@Deprecated
+public abstract class AbstractScalarDifferentiableOptimizer
+ extends BaseAbstractMultivariateOptimizer<DifferentiableMultivariateFunction>
+ implements DifferentiableMultivariateOptimizer {
+ /**
+ * Objective function gradient.
+ */
+ private MultivariateVectorFunction gradient;
+
+ /**
+ * Simple constructor with default settings.
+ * The convergence check is set to a
+ * {@link org.apache.commons.math3.optimization.SimpleValueChecker
+ * SimpleValueChecker}.
+ * @deprecated See {@link org.apache.commons.math3.optimization.SimpleValueChecker#SimpleValueChecker()}
+ */
+ @Deprecated
+ protected AbstractScalarDifferentiableOptimizer() {}
+
+ /**
+ * @param checker Convergence checker.
+ */
+ protected AbstractScalarDifferentiableOptimizer(ConvergenceChecker<PointValuePair> checker) {
+ super(checker);
+ }
+
+ /**
+ * Compute the gradient vector.
+ *
+ * @param evaluationPoint Point at which the gradient must be evaluated.
+ * @return the gradient at the specified point.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the allowed number of evaluations is exceeded.
+ */
+ protected double[] computeObjectiveGradient(final double[] evaluationPoint) {
+ return gradient.value(evaluationPoint);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair optimizeInternal(int maxEval,
+ final DifferentiableMultivariateFunction f,
+ final GoalType goalType,
+ final double[] startPoint) {
+ // Store optimization problem characteristics.
+ gradient = f.gradient();
+
+ return super.optimizeInternal(maxEval, f, goalType, startPoint);
+ }
+
+ /**
+ * Optimize an objective function.
+ *
+ * @param f Objective function.
+ * @param goalType Type of optimization goal: either
+ * {@link GoalType#MAXIMIZE} or {@link GoalType#MINIMIZE}.
+ * @param startPoint Start point for optimization.
+ * @param maxEval Maximum number of function evaluations.
+ * @return the point/value pair giving the optimal value for objective
+ * function.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the start point dimension is wrong.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations is exceeded.
+ * @throws org.apache.commons.math3.exception.NullArgumentException if
+ * any argument is {@code null}.
+ */
+ public PointValuePair optimize(final int maxEval,
+ final MultivariateDifferentiableFunction f,
+ final GoalType goalType,
+ final double[] startPoint) {
+ return optimizeInternal(maxEval,
+ FunctionUtils.toDifferentiableMultivariateFunction(f),
+ goalType,
+ startPoint);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/ConjugateGradientFormula.java b/src/main/java/org/apache/commons/math3/optimization/general/ConjugateGradientFormula.java
new file mode 100644
index 0000000..5fee40a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/ConjugateGradientFormula.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+/**
+ * Available choices of update formulas for the &beta; parameter
+ * in {@link NonLinearConjugateGradientOptimizer}.
+ * <p>
+ * The &beta; parameter is used to compute the successive conjugate
+ * search directions. For non-linear conjugate gradients, there are
+ * two formulas to compute &beta;:
+ * <ul>
+ * <li>Fletcher-Reeves formula</li>
+ * <li>Polak-Ribi&egrave;re formula</li>
+ * </ul>
+ * On the one hand, the Fletcher-Reeves formula is guaranteed to converge
+ * if the start point is close enough of the optimum whether the
+ * Polak-Ribi&egrave;re formula may not converge in rare cases. On the
+ * other hand, the Polak-Ribi&egrave;re formula is often faster when it
+ * does converge. Polak-Ribi&egrave;re is often used.
+ * <p>
+ * @see NonLinearConjugateGradientOptimizer
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ */
+@Deprecated
+public enum ConjugateGradientFormula {
+
+ /** Fletcher-Reeves formula. */
+ FLETCHER_REEVES,
+
+ /** Polak-Ribi&egrave;re formula. */
+ POLAK_RIBIERE
+
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/GaussNewtonOptimizer.java
new file mode 100644
index 0000000..464a0f0
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/GaussNewtonOptimizer.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.MathInternalError;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.BlockRealMatrix;
+import org.apache.commons.math3.linear.DecompositionSolver;
+import org.apache.commons.math3.linear.LUDecomposition;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.SingularMatrixException;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.optimization.SimpleVectorValueChecker;
+import org.apache.commons.math3.optimization.PointVectorValuePair;
+
+/**
+ * Gauss-Newton least-squares solver.
+ * <p>
+ * This class solve a least-square problem by solving the normal equations
+ * of the linearized problem at each iteration. Either LU decomposition or
+ * QR decomposition can be used to solve the normal equations. LU decomposition
+ * is faster but QR decomposition is more robust for difficult problems.
+ * </p>
+ *
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ *
+ */
+@Deprecated
+public class GaussNewtonOptimizer extends AbstractLeastSquaresOptimizer {
+ /** Indicator for using LU decomposition. */
+ private final boolean useLU;
+
+ /**
+ * Simple constructor with default settings.
+ * The normal equations will be solved using LU decomposition and the
+ * convergence check is set to a {@link SimpleVectorValueChecker}
+ * with default tolerances.
+ * @deprecated See {@link SimpleVectorValueChecker#SimpleVectorValueChecker()}
+ */
+ @Deprecated
+ public GaussNewtonOptimizer() {
+ this(true);
+ }
+
+ /**
+ * Simple constructor with default settings.
+ * The normal equations will be solved using LU decomposition.
+ *
+ * @param checker Convergence checker.
+ */
+ public GaussNewtonOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
+ this(true, checker);
+ }
+
+ /**
+ * Simple constructor with default settings.
+ * The convergence check is set to a {@link SimpleVectorValueChecker}
+ * with default tolerances.
+ *
+ * @param useLU If {@code true}, the normal equations will be solved
+ * using LU decomposition, otherwise they will be solved using QR
+ * decomposition.
+ * @deprecated See {@link SimpleVectorValueChecker#SimpleVectorValueChecker()}
+ */
+ @Deprecated
+ public GaussNewtonOptimizer(final boolean useLU) {
+ this(useLU, new SimpleVectorValueChecker());
+ }
+
+ /**
+ * @param useLU If {@code true}, the normal equations will be solved
+ * using LU decomposition, otherwise they will be solved using QR
+ * decomposition.
+ * @param checker Convergence checker.
+ */
+ public GaussNewtonOptimizer(final boolean useLU,
+ ConvergenceChecker<PointVectorValuePair> checker) {
+ super(checker);
+ this.useLU = useLU;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public PointVectorValuePair doOptimize() {
+ final ConvergenceChecker<PointVectorValuePair> checker
+ = getConvergenceChecker();
+
+ // Computation will be useless without a checker (see "for-loop").
+ if (checker == null) {
+ throw new NullArgumentException();
+ }
+
+ final double[] targetValues = getTarget();
+ final int nR = targetValues.length; // Number of observed data.
+
+ final RealMatrix weightMatrix = getWeight();
+ // Diagonal of the weight matrix.
+ final double[] residualsWeights = new double[nR];
+ for (int i = 0; i < nR; i++) {
+ residualsWeights[i] = weightMatrix.getEntry(i, i);
+ }
+
+ final double[] currentPoint = getStartPoint();
+ final int nC = currentPoint.length;
+
+ // iterate until convergence is reached
+ PointVectorValuePair current = null;
+ int iter = 0;
+ for (boolean converged = false; !converged;) {
+ ++iter;
+
+ // evaluate the objective function and its jacobian
+ PointVectorValuePair previous = current;
+ // Value of the objective function at "currentPoint".
+ final double[] currentObjective = computeObjectiveValue(currentPoint);
+ final double[] currentResiduals = computeResiduals(currentObjective);
+ final RealMatrix weightedJacobian = computeWeightedJacobian(currentPoint);
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+
+ // build the linear problem
+ final double[] b = new double[nC];
+ final double[][] a = new double[nC][nC];
+ for (int i = 0; i < nR; ++i) {
+
+ final double[] grad = weightedJacobian.getRow(i);
+ final double weight = residualsWeights[i];
+ final double residual = currentResiduals[i];
+
+ // compute the normal equation
+ final double wr = weight * residual;
+ for (int j = 0; j < nC; ++j) {
+ b[j] += wr * grad[j];
+ }
+
+ // build the contribution matrix for measurement i
+ for (int k = 0; k < nC; ++k) {
+ double[] ak = a[k];
+ double wgk = weight * grad[k];
+ for (int l = 0; l < nC; ++l) {
+ ak[l] += wgk * grad[l];
+ }
+ }
+ }
+
+ try {
+ // solve the linearized least squares problem
+ RealMatrix mA = new BlockRealMatrix(a);
+ DecompositionSolver solver = useLU ?
+ new LUDecomposition(mA).getSolver() :
+ new QRDecomposition(mA).getSolver();
+ final double[] dX = solver.solve(new ArrayRealVector(b, false)).toArray();
+ // update the estimated parameters
+ for (int i = 0; i < nC; ++i) {
+ currentPoint[i] += dX[i];
+ }
+ } catch (SingularMatrixException e) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM);
+ }
+
+ // Check convergence.
+ if (previous != null) {
+ converged = checker.converged(iter, previous, current);
+ if (converged) {
+ cost = computeCost(currentResiduals);
+ // Update (deprecated) "point" field.
+ point = current.getPoint();
+ return current;
+ }
+ }
+ }
+ // Must never happen.
+ throw new MathInternalError();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/LevenbergMarquardtOptimizer.java
new file mode 100644
index 0000000..a29cafc
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/LevenbergMarquardtOptimizer.java
@@ -0,0 +1,943 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.optimization.general;
+
+import java.util.Arrays;
+
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optimization.PointVectorValuePair;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+
+
+/**
+ * This class solves a least squares problem using the Levenberg-Marquardt algorithm.
+ *
+ * <p>This implementation <em>should</em> work even for over-determined systems
+ * (i.e. systems having more point than equations). Over-determined systems
+ * are solved by ignoring the point which have the smallest impact according
+ * to their jacobian column norm. Only the rank of the matrix and some loop bounds
+ * are changed to implement this.</p>
+ *
+ * <p>The resolution engine is a simple translation of the MINPACK <a
+ * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
+ * changes. The changes include the over-determined resolution, the use of
+ * inherited convergence checker and the Q.R. decomposition which has been
+ * rewritten following the algorithm described in the
+ * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
+ * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
+ * <p>The authors of the original fortran version are:
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * The redistribution policy for MINPACK is available <a
+ * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
+ * is reproduced below.</p>
+ *
+ * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
+ * <tr><td>
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * </td></tr>
+ * <tr><td>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * <ol>
+ * <li>Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.</li>
+ * <li>Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.</li>
+ * <li>The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ * <code>This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.</code>
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.</li>
+ * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.</strong></li>
+ * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
+ * <ol></td></tr>
+ * </table>
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ *
+ */
+@Deprecated
+public class LevenbergMarquardtOptimizer extends AbstractLeastSquaresOptimizer {
+ /** Number of solved point. */
+ private int solvedCols;
+ /** Diagonal elements of the R matrix in the Q.R. decomposition. */
+ private double[] diagR;
+ /** Norms of the columns of the jacobian matrix. */
+ private double[] jacNorm;
+ /** Coefficients of the Householder transforms vectors. */
+ private double[] beta;
+ /** Columns permutation array. */
+ private int[] permutation;
+ /** Rank of the jacobian matrix. */
+ private int rank;
+ /** Levenberg-Marquardt parameter. */
+ private double lmPar;
+ /** Parameters evolution direction associated with lmPar. */
+ private double[] lmDir;
+ /** Positive input variable used in determining the initial step bound. */
+ private final double initialStepBoundFactor;
+ /** Desired relative error in the sum of squares. */
+ private final double costRelativeTolerance;
+ /** Desired relative error in the approximate solution parameters. */
+ private final double parRelativeTolerance;
+ /** Desired max cosine on the orthogonality between the function vector
+ * and the columns of the jacobian. */
+ private final double orthoTolerance;
+ /** Threshold for QR ranking. */
+ private final double qrRankingThreshold;
+ /** Weighted residuals. */
+ private double[] weightedResidual;
+ /** Weighted Jacobian. */
+ private double[][] weightedJacobian;
+
+ /**
+ * Build an optimizer for least squares problems with default values
+ * for all the tuning parameters (see the {@link
+ * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+ * other contructor}.
+ * The default values for the algorithm settings are:
+ * <ul>
+ * <li>Initial step bound factor: 100</li>
+ * <li>Cost relative tolerance: 1e-10</li>
+ * <li>Parameters relative tolerance: 1e-10</li>
+ * <li>Orthogonality tolerance: 1e-10</li>
+ * <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
+ * </ul>
+ */
+ public LevenbergMarquardtOptimizer() {
+ this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+ }
+
+ /**
+ * Constructor that allows the specification of a custom convergence
+ * checker.
+ * Note that all the usual convergence checks will be <em>disabled</em>.
+ * The default values for the algorithm settings are:
+ * <ul>
+ * <li>Initial step bound factor: 100</li>
+ * <li>Cost relative tolerance: 1e-10</li>
+ * <li>Parameters relative tolerance: 1e-10</li>
+ * <li>Orthogonality tolerance: 1e-10</li>
+ * <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
+ * </ul>
+ *
+ * @param checker Convergence checker.
+ */
+ public LevenbergMarquardtOptimizer(ConvergenceChecker<PointVectorValuePair> checker) {
+ this(100, checker, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+ }
+
+ /**
+ * Constructor that allows the specification of a custom convergence
+ * checker, in addition to the standard ones.
+ *
+ * @param initialStepBoundFactor Positive input variable used in
+ * determining the initial step bound. This bound is set to the
+ * product of initialStepBoundFactor and the euclidean norm of
+ * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+ * itself. In most cases factor should lie in the interval
+ * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+ * @param checker Convergence checker.
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ * @param threshold Desired threshold for QR ranking. If the squared norm
+ * of a column vector is smaller or equal to this threshold during QR
+ * decomposition, it is considered to be a zero vector and hence the rank
+ * of the matrix is reduced.
+ */
+ public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+ ConvergenceChecker<PointVectorValuePair> checker,
+ double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance,
+ double threshold) {
+ super(checker);
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ this.costRelativeTolerance = costRelativeTolerance;
+ this.parRelativeTolerance = parRelativeTolerance;
+ this.orthoTolerance = orthoTolerance;
+ this.qrRankingThreshold = threshold;
+ }
+
+ /**
+ * Build an optimizer for least squares problems with default values
+ * for some of the tuning parameters (see the {@link
+ * #LevenbergMarquardtOptimizer(double,double,double,double,double)
+ * other contructor}.
+ * The default values for the algorithm settings are:
+ * <ul>
+ * <li>Initial step bound factor}: 100</li>
+ * <li>QR ranking threshold}: {@link Precision#SAFE_MIN}</li>
+ * </ul>
+ *
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ */
+ public LevenbergMarquardtOptimizer(double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance) {
+ this(100,
+ costRelativeTolerance, parRelativeTolerance, orthoTolerance,
+ Precision.SAFE_MIN);
+ }
+
+ /**
+ * The arguments control the behaviour of the default convergence checking
+ * procedure.
+ * Additional criteria can defined through the setting of a {@link
+ * ConvergenceChecker}.
+ *
+ * @param initialStepBoundFactor Positive input variable used in
+ * determining the initial step bound. This bound is set to the
+ * product of initialStepBoundFactor and the euclidean norm of
+ * {@code diag * x} if non-zero, or else to {@code initialStepBoundFactor}
+ * itself. In most cases factor should lie in the interval
+ * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+ * @param costRelativeTolerance Desired relative error in the sum of
+ * squares.
+ * @param parRelativeTolerance Desired relative error in the approximate
+ * solution parameters.
+ * @param orthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ * @param threshold Desired threshold for QR ranking. If the squared norm
+ * of a column vector is smaller or equal to this threshold during QR
+ * decomposition, it is considered to be a zero vector and hence the rank
+ * of the matrix is reduced.
+ */
+ public LevenbergMarquardtOptimizer(double initialStepBoundFactor,
+ double costRelativeTolerance,
+ double parRelativeTolerance,
+ double orthoTolerance,
+ double threshold) {
+ super(null); // No custom convergence criterion.
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ this.costRelativeTolerance = costRelativeTolerance;
+ this.parRelativeTolerance = parRelativeTolerance;
+ this.orthoTolerance = orthoTolerance;
+ this.qrRankingThreshold = threshold;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointVectorValuePair doOptimize() {
+ final int nR = getTarget().length; // Number of observed data.
+ final double[] currentPoint = getStartPoint();
+ final int nC = currentPoint.length; // Number of parameters.
+
+ // arrays shared with the other private methods
+ solvedCols = FastMath.min(nR, nC);
+ diagR = new double[nC];
+ jacNorm = new double[nC];
+ beta = new double[nC];
+ permutation = new int[nC];
+ lmDir = new double[nC];
+
+ // local point
+ double delta = 0;
+ double xNorm = 0;
+ double[] diag = new double[nC];
+ double[] oldX = new double[nC];
+ double[] oldRes = new double[nR];
+ double[] oldObj = new double[nR];
+ double[] qtf = new double[nR];
+ double[] work1 = new double[nC];
+ double[] work2 = new double[nC];
+ double[] work3 = new double[nC];
+
+ final RealMatrix weightMatrixSqrt = getWeightSquareRoot();
+
+ // Evaluate the function at the starting point and calculate its norm.
+ double[] currentObjective = computeObjectiveValue(currentPoint);
+ double[] currentResiduals = computeResiduals(currentObjective);
+ PointVectorValuePair current = new PointVectorValuePair(currentPoint, currentObjective);
+ double currentCost = computeCost(currentResiduals);
+
+ // Outer loop.
+ lmPar = 0;
+ boolean firstIteration = true;
+ int iter = 0;
+ final ConvergenceChecker<PointVectorValuePair> checker = getConvergenceChecker();
+ while (true) {
+ ++iter;
+ final PointVectorValuePair previous = current;
+
+ // QR decomposition of the jacobian matrix
+ qrDecomposition(computeWeightedJacobian(currentPoint));
+
+ weightedResidual = weightMatrixSqrt.operate(currentResiduals);
+ for (int i = 0; i < nR; i++) {
+ qtf[i] = weightedResidual[i];
+ }
+
+ // compute Qt.res
+ qTy(qtf);
+
+ // now we don't need Q anymore,
+ // so let jacobian contain the R matrix with its diagonal elements
+ for (int k = 0; k < solvedCols; ++k) {
+ int pk = permutation[k];
+ weightedJacobian[k][pk] = diagR[pk];
+ }
+
+ if (firstIteration) {
+ // scale the point according to the norms of the columns
+ // of the initial jacobian
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double dk = jacNorm[k];
+ if (dk == 0) {
+ dk = 1.0;
+ }
+ double xk = dk * currentPoint[k];
+ xNorm += xk * xk;
+ diag[k] = dk;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // initialize the step bound delta
+ delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
+ }
+
+ // check orthogonality between function vector and jacobian columns
+ double maxCosine = 0;
+ if (currentCost != 0) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = jacNorm[pj];
+ if (s != 0) {
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qtf[i];
+ }
+ maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
+ }
+ }
+ }
+ if (maxCosine <= orthoTolerance) {
+ // Convergence has been reached.
+ setCost(currentCost);
+ // Update (deprecated) "point" field.
+ point = current.getPoint();
+ return current;
+ }
+
+ // rescale if necessary
+ for (int j = 0; j < nC; ++j) {
+ diag[j] = FastMath.max(diag[j], jacNorm[j]);
+ }
+
+ // Inner loop.
+ for (double ratio = 0; ratio < 1.0e-4;) {
+
+ // save the state
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ oldX[pj] = currentPoint[pj];
+ }
+ final double previousCost = currentCost;
+ double[] tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+ tmpVec = currentObjective;
+ currentObjective = oldObj;
+ oldObj = tmpVec;
+
+ // determine the Levenberg-Marquardt parameter
+ determineLMParameter(qtf, delta, diag, work1, work2, work3);
+
+ // compute the new point and the norm of the evolution direction
+ double lmNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ lmDir[pj] = -lmDir[pj];
+ currentPoint[pj] = oldX[pj] + lmDir[pj];
+ double s = diag[pj] * lmDir[pj];
+ lmNorm += s * s;
+ }
+ lmNorm = FastMath.sqrt(lmNorm);
+ // on the first iteration, adjust the initial step bound.
+ if (firstIteration) {
+ delta = FastMath.min(delta, lmNorm);
+ }
+
+ // Evaluate the function at x + p and calculate its norm.
+ currentObjective = computeObjectiveValue(currentPoint);
+ currentResiduals = computeResiduals(currentObjective);
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+ currentCost = computeCost(currentResiduals);
+
+ // compute the scaled actual reduction
+ double actRed = -1.0;
+ if (0.1 * currentCost < previousCost) {
+ double r = currentCost / previousCost;
+ actRed = 1.0 - r * r;
+ }
+
+ // compute the scaled predicted reduction
+ // and the scaled directional derivative
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double dirJ = lmDir[pj];
+ work1[j] = 0;
+ for (int i = 0; i <= j; ++i) {
+ work1[i] += weightedJacobian[i][pj] * dirJ;
+ }
+ }
+ double coeff1 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ coeff1 += work1[j] * work1[j];
+ }
+ double pc2 = previousCost * previousCost;
+ coeff1 /= pc2;
+ double coeff2 = lmPar * lmNorm * lmNorm / pc2;
+ double preRed = coeff1 + 2 * coeff2;
+ double dirDer = -(coeff1 + coeff2);
+
+ // ratio of the actual to the predicted reduction
+ ratio = (preRed == 0) ? 0 : (actRed / preRed);
+
+ // update the step bound
+ if (ratio <= 0.25) {
+ double tmp =
+ (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
+ if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
+ tmp = 0.1;
+ }
+ delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
+ lmPar /= tmp;
+ } else if ((lmPar == 0) || (ratio >= 0.75)) {
+ delta = 2 * lmNorm;
+ lmPar *= 0.5;
+ }
+
+ // test for successful iteration.
+ if (ratio >= 1.0e-4) {
+ // successful iteration, update the norm
+ firstIteration = false;
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double xK = diag[k] * currentPoint[k];
+ xNorm += xK * xK;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // tests for convergence.
+ if (checker != null && checker.converged(iter, previous, current)) {
+ setCost(currentCost);
+ // Update (deprecated) "point" field.
+ point = current.getPoint();
+ return current;
+ }
+ } else {
+ // failed iteration, reset the previous values
+ currentCost = previousCost;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ currentPoint[pj] = oldX[pj];
+ }
+ tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+ tmpVec = currentObjective;
+ currentObjective = oldObj;
+ oldObj = tmpVec;
+ // Reset "current" to previous values.
+ current = new PointVectorValuePair(currentPoint, currentObjective);
+ }
+
+ // Default convergence criteria.
+ if ((FastMath.abs(actRed) <= costRelativeTolerance &&
+ preRed <= costRelativeTolerance &&
+ ratio <= 2.0) ||
+ delta <= parRelativeTolerance * xNorm) {
+ setCost(currentCost);
+ // Update (deprecated) "point" field.
+ point = current.getPoint();
+ return current;
+ }
+
+ // tests for termination and stringent tolerances
+ // (2.2204e-16 is the machine epsilon for IEEE754)
+ if ((FastMath.abs(actRed) <= 2.2204e-16) && (preRed <= 2.2204e-16) && (ratio <= 2.0)) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
+ costRelativeTolerance);
+ } else if (delta <= 2.2204e-16 * xNorm) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
+ parRelativeTolerance);
+ } else if (maxCosine <= 2.2204e-16) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
+ orthoTolerance);
+ }
+ }
+ }
+ }
+
+ /**
+ * Determine the Levenberg-Marquardt parameter.
+ * <p>This implementation is a translation in Java of the MINPACK
+ * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
+ * routine.</p>
+ * <p>This method sets the lmPar and lmDir attributes.</p>
+ * <p>The authors of the original fortran function are:</p>
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * <p>Luc Maisonobe did the Java translation.</p>
+ *
+ * @param qy array containing qTy
+ * @param delta upper bound on the euclidean norm of diagR * lmDir
+ * @param diag diagonal matrix
+ * @param work1 work array
+ * @param work2 work array
+ * @param work3 work array
+ */
+ private void determineLMParameter(double[] qy, double delta, double[] diag,
+ double[] work1, double[] work2, double[] work3) {
+ final int nC = weightedJacobian[0].length;
+
+ // compute and store in x the gauss-newton direction, if the
+ // jacobian is rank-deficient, obtain a least squares solution
+ for (int j = 0; j < rank; ++j) {
+ lmDir[permutation[j]] = qy[j];
+ }
+ for (int j = rank; j < nC; ++j) {
+ lmDir[permutation[j]] = 0;
+ }
+ for (int k = rank - 1; k >= 0; --k) {
+ int pk = permutation[k];
+ double ypk = lmDir[pk] / diagR[pk];
+ for (int i = 0; i < k; ++i) {
+ lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
+ }
+ lmDir[pk] = ypk;
+ }
+
+ // evaluate the function at the origin, and test
+ // for acceptance of the Gauss-Newton direction
+ double dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work1[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double fp = dxNorm - delta;
+ if (fp <= 0.1 * delta) {
+ lmPar = 0;
+ return;
+ }
+
+ // if the jacobian is not rank deficient, the Newton step provides
+ // a lower bound, parl, for the zero of the function,
+ // otherwise set this bound to zero
+ double sum2;
+ double parl = 0;
+ if (rank == solvedCols) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] *= diag[pj] / dxNorm;
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i < j; ++i) {
+ sum += weightedJacobian[i][pj] * work1[permutation[i]];
+ }
+ double s = (work1[pj] - sum) / diagR[pj];
+ work1[pj] = s;
+ sum2 += s * s;
+ }
+ parl = fp / (delta * sum2);
+ }
+
+ // calculate an upper bound, paru, for the zero of the function
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qy[i];
+ }
+ sum /= diag[pj];
+ sum2 += sum * sum;
+ }
+ double gNorm = FastMath.sqrt(sum2);
+ double paru = gNorm / delta;
+ if (paru == 0) {
+ // 2.2251e-308 is the smallest positive real for IEE754
+ paru = 2.2251e-308 / FastMath.min(delta, 0.1);
+ }
+
+ // if the input par lies outside of the interval (parl,paru),
+ // set par to the closer endpoint
+ lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
+ if (lmPar == 0) {
+ lmPar = gNorm / dxNorm;
+ }
+
+ for (int countdown = 10; countdown >= 0; --countdown) {
+
+ // evaluate the function at the current value of lmPar
+ if (lmPar == 0) {
+ lmPar = FastMath.max(2.2251e-308, 0.001 * paru);
+ }
+ double sPar = FastMath.sqrt(lmPar);
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = sPar * diag[pj];
+ }
+ determineLMDirection(qy, work1, work2, work3);
+
+ dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work3[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double previousFP = fp;
+ fp = dxNorm - delta;
+
+ // if the function is small enough, accept the current value
+ // of lmPar, also test for the exceptional cases where parl is zero
+ if ((FastMath.abs(fp) <= 0.1 * delta) ||
+ ((parl == 0) && (fp <= previousFP) && (previousFP < 0))) {
+ return;
+ }
+
+ // compute the Newton correction
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = work3[pj] * diag[pj] / dxNorm;
+ }
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] /= work2[j];
+ double tmp = work1[pj];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
+ }
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ double s = work1[permutation[j]];
+ sum2 += s * s;
+ }
+ double correction = fp / (delta * sum2);
+
+ // depending on the sign of the function, update parl or paru.
+ if (fp > 0) {
+ parl = FastMath.max(parl, lmPar);
+ } else if (fp < 0) {
+ paru = FastMath.min(paru, lmPar);
+ }
+
+ // compute an improved estimate for lmPar
+ lmPar = FastMath.max(parl, lmPar + correction);
+
+ }
+ }
+
+ /**
+ * Solve a*x = b and d*x = 0 in the least squares sense.
+ * <p>This implementation is a translation in Java of the MINPACK
+ * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
+ * routine.</p>
+ * <p>This method sets the lmDir and lmDiag attributes.</p>
+ * <p>The authors of the original fortran function are:</p>
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * <p>Luc Maisonobe did the Java translation.</p>
+ *
+ * @param qy array containing qTy
+ * @param diag diagonal matrix
+ * @param lmDiag diagonal elements associated with lmDir
+ * @param work work array
+ */
+ private void determineLMDirection(double[] qy, double[] diag,
+ double[] lmDiag, double[] work) {
+
+ // copy R and Qty to preserve input and initialize s
+ // in particular, save the diagonal elements of R in lmDir
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
+ }
+ lmDir[j] = diagR[pj];
+ work[j] = qy[j];
+ }
+
+ // eliminate the diagonal matrix d using a Givens rotation
+ for (int j = 0; j < solvedCols; ++j) {
+
+ // prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the Q.R. factorization
+ int pj = permutation[j];
+ double dpj = diag[pj];
+ if (dpj != 0) {
+ Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
+ }
+ lmDiag[j] = dpj;
+
+ // the transformations to eliminate the row of d
+ // modify only a single element of Qty
+ // beyond the first n, which is initially zero.
+ double qtbpj = 0;
+ for (int k = j; k < solvedCols; ++k) {
+ int pk = permutation[k];
+
+ // determine a Givens rotation which eliminates the
+ // appropriate element in the current row of d
+ if (lmDiag[k] != 0) {
+
+ final double sin;
+ final double cos;
+ double rkk = weightedJacobian[k][pk];
+ if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
+ final double cotan = rkk / lmDiag[k];
+ sin = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
+ cos = sin * cotan;
+ } else {
+ final double tan = lmDiag[k] / rkk;
+ cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
+ sin = cos * tan;
+ }
+
+ // compute the modified diagonal element of R and
+ // the modified element of (Qty,0)
+ weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
+ final double temp = cos * work[k] + sin * qtbpj;
+ qtbpj = -sin * work[k] + cos * qtbpj;
+ work[k] = temp;
+
+ // accumulate the tranformation in the row of s
+ for (int i = k + 1; i < solvedCols; ++i) {
+ double rik = weightedJacobian[i][pk];
+ final double temp2 = cos * rik + sin * lmDiag[i];
+ lmDiag[i] = -sin * rik + cos * lmDiag[i];
+ weightedJacobian[i][pk] = temp2;
+ }
+ }
+ }
+
+ // store the diagonal element of s and restore
+ // the corresponding diagonal element of R
+ lmDiag[j] = weightedJacobian[j][permutation[j]];
+ weightedJacobian[j][permutation[j]] = lmDir[j];
+ }
+
+ // solve the triangular system for z, if the system is
+ // singular, then obtain a least squares solution
+ int nSing = solvedCols;
+ for (int j = 0; j < solvedCols; ++j) {
+ if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
+ nSing = j;
+ }
+ if (nSing < solvedCols) {
+ work[j] = 0;
+ }
+ }
+ if (nSing > 0) {
+ for (int j = nSing - 1; j >= 0; --j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = j + 1; i < nSing; ++i) {
+ sum += weightedJacobian[i][pj] * work[i];
+ }
+ work[j] = (work[j] - sum) / lmDiag[j];
+ }
+ }
+
+ // permute the components of z back to components of lmDir
+ for (int j = 0; j < lmDir.length; ++j) {
+ lmDir[permutation[j]] = work[j];
+ }
+ }
+
+ /**
+ * Decompose a matrix A as A.P = Q.R using Householder transforms.
+ * <p>As suggested in the P. Lascaux and R. Theodor book
+ * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
+ * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
+ * the Householder transforms with u<sub>k</sub> unit vectors such that:
+ * <pre>
+ * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
+ * </pre>
+ * we use <sub>k</sub> non-unit vectors such that:
+ * <pre>
+ * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
+ * </pre>
+ * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
+ * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
+ * them from the v<sub>k</sub> vectors would be costly.</p>
+ * <p>This decomposition handles rank deficient cases since the tranformations
+ * are performed in non-increasing columns norms order thanks to columns
+ * pivoting. The diagonal elements of the R matrix are therefore also in
+ * non-increasing absolute values order.</p>
+ *
+ * @param jacobian Weighted Jacobian matrix at the current point.
+ * @exception ConvergenceException if the decomposition cannot be performed
+ */
+ private void qrDecomposition(RealMatrix jacobian) throws ConvergenceException {
+ // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
+ // hence the multiplication by -1.
+ weightedJacobian = jacobian.scalarMultiply(-1).getData();
+
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ // initializations
+ for (int k = 0; k < nC; ++k) {
+ permutation[k] = k;
+ double norm2 = 0;
+ for (int i = 0; i < nR; ++i) {
+ double akk = weightedJacobian[i][k];
+ norm2 += akk * akk;
+ }
+ jacNorm[k] = FastMath.sqrt(norm2);
+ }
+
+ // transform the matrix column after column
+ for (int k = 0; k < nC; ++k) {
+
+ // select the column with the greatest norm on active components
+ int nextColumn = -1;
+ double ak2 = Double.NEGATIVE_INFINITY;
+ for (int i = k; i < nC; ++i) {
+ double norm2 = 0;
+ for (int j = k; j < nR; ++j) {
+ double aki = weightedJacobian[j][permutation[i]];
+ norm2 += aki * aki;
+ }
+ if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
+ nR, nC);
+ }
+ if (norm2 > ak2) {
+ nextColumn = i;
+ ak2 = norm2;
+ }
+ }
+ if (ak2 <= qrRankingThreshold) {
+ rank = k;
+ return;
+ }
+ int pk = permutation[nextColumn];
+ permutation[nextColumn] = permutation[k];
+ permutation[k] = pk;
+
+ // choose alpha such that Hk.u = alpha ek
+ double akk = weightedJacobian[k][pk];
+ double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
+ double betak = 1.0 / (ak2 - akk * alpha);
+ beta[pk] = betak;
+
+ // transform the current column
+ diagR[pk] = alpha;
+ weightedJacobian[k][pk] -= alpha;
+
+ // transform the remaining columns
+ for (int dk = nC - 1 - k; dk > 0; --dk) {
+ double gamma = 0;
+ for (int j = k; j < nR; ++j) {
+ gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
+ }
+ gamma *= betak;
+ for (int j = k; j < nR; ++j) {
+ weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
+ }
+ }
+ }
+ rank = solvedCols;
+ }
+
+ /**
+ * Compute the product Qt.y for some Q.R. decomposition.
+ *
+ * @param y vector to multiply (will be overwritten with the result)
+ */
+ private void qTy(double[] y) {
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ for (int k = 0; k < nC; ++k) {
+ int pk = permutation[k];
+ double gamma = 0;
+ for (int i = k; i < nR; ++i) {
+ gamma += weightedJacobian[i][pk] * y[i];
+ }
+ gamma *= beta[pk];
+ for (int i = k; i < nR; ++i) {
+ y[i] -= gamma * weightedJacobian[i][pk];
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/NonLinearConjugateGradientOptimizer.java b/src/main/java/org/apache/commons/math3/optimization/general/NonLinearConjugateGradientOptimizer.java
new file mode 100644
index 0000000..ee16472
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/NonLinearConjugateGradientOptimizer.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.analysis.UnivariateFunction;
+import org.apache.commons.math3.analysis.solvers.BrentSolver;
+import org.apache.commons.math3.analysis.solvers.UnivariateSolver;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optimization.GoalType;
+import org.apache.commons.math3.optimization.PointValuePair;
+import org.apache.commons.math3.optimization.SimpleValueChecker;
+import org.apache.commons.math3.optimization.ConvergenceChecker;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Non-linear conjugate gradient optimizer.
+ * <p>
+ * This class supports both the Fletcher-Reeves and the Polak-Ribi&egrave;re
+ * update formulas for the conjugate search directions. It also supports
+ * optional preconditioning.
+ * </p>
+ *
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ *
+ */
+@Deprecated
+public class NonLinearConjugateGradientOptimizer
+ extends AbstractScalarDifferentiableOptimizer {
+ /** Update formula for the beta parameter. */
+ private final ConjugateGradientFormula updateFormula;
+ /** Preconditioner (may be null). */
+ private final Preconditioner preconditioner;
+ /** solver to use in the line search (may be null). */
+ private final UnivariateSolver solver;
+ /** Initial step used to bracket the optimum in line search. */
+ private double initialStep;
+ /** Current point. */
+ private double[] point;
+
+ /**
+ * Constructor with default {@link SimpleValueChecker checker},
+ * {@link BrentSolver line search solver} and
+ * {@link IdentityPreconditioner preconditioner}.
+ *
+ * @param updateFormula formula to use for updating the &beta; parameter,
+ * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
+ * ConjugateGradientFormula#POLAK_RIBIERE}.
+ * @deprecated See {@link SimpleValueChecker#SimpleValueChecker()}
+ */
+ @Deprecated
+ public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula) {
+ this(updateFormula,
+ new SimpleValueChecker());
+ }
+
+ /**
+ * Constructor with default {@link BrentSolver line search solver} and
+ * {@link IdentityPreconditioner preconditioner}.
+ *
+ * @param updateFormula formula to use for updating the &beta; parameter,
+ * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
+ * ConjugateGradientFormula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ */
+ public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
+ ConvergenceChecker<PointValuePair> checker) {
+ this(updateFormula,
+ checker,
+ new BrentSolver(),
+ new IdentityPreconditioner());
+ }
+
+
+ /**
+ * Constructor with default {@link IdentityPreconditioner preconditioner}.
+ *
+ * @param updateFormula formula to use for updating the &beta; parameter,
+ * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
+ * ConjugateGradientFormula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ * @param lineSearchSolver Solver to use during line search.
+ */
+ public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
+ ConvergenceChecker<PointValuePair> checker,
+ final UnivariateSolver lineSearchSolver) {
+ this(updateFormula,
+ checker,
+ lineSearchSolver,
+ new IdentityPreconditioner());
+ }
+
+ /**
+ * @param updateFormula formula to use for updating the &beta; parameter,
+ * must be one of {@link ConjugateGradientFormula#FLETCHER_REEVES} or {@link
+ * ConjugateGradientFormula#POLAK_RIBIERE}.
+ * @param checker Convergence checker.
+ * @param lineSearchSolver Solver to use during line search.
+ * @param preconditioner Preconditioner.
+ */
+ public NonLinearConjugateGradientOptimizer(final ConjugateGradientFormula updateFormula,
+ ConvergenceChecker<PointValuePair> checker,
+ final UnivariateSolver lineSearchSolver,
+ final Preconditioner preconditioner) {
+ super(checker);
+
+ this.updateFormula = updateFormula;
+ solver = lineSearchSolver;
+ this.preconditioner = preconditioner;
+ initialStep = 1.0;
+ }
+
+ /**
+ * Set the initial step used to bracket the optimum in line search.
+ * <p>
+ * The initial step is a factor with respect to the search direction,
+ * which itself is roughly related to the gradient of the function
+ * </p>
+ * @param initialStep initial step used to bracket the optimum in line search,
+ * if a non-positive value is used, the initial step is reset to its
+ * default value of 1.0
+ */
+ public void setInitialStep(final double initialStep) {
+ if (initialStep <= 0) {
+ this.initialStep = 1.0;
+ } else {
+ this.initialStep = initialStep;
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected PointValuePair doOptimize() {
+ final ConvergenceChecker<PointValuePair> checker = getConvergenceChecker();
+ point = getStartPoint();
+ final GoalType goal = getGoalType();
+ final int n = point.length;
+ double[] r = computeObjectiveGradient(point);
+ if (goal == GoalType.MINIMIZE) {
+ for (int i = 0; i < n; ++i) {
+ r[i] = -r[i];
+ }
+ }
+
+ // Initial search direction.
+ double[] steepestDescent = preconditioner.precondition(point, r);
+ double[] searchDirection = steepestDescent.clone();
+
+ double delta = 0;
+ for (int i = 0; i < n; ++i) {
+ delta += r[i] * searchDirection[i];
+ }
+
+ PointValuePair current = null;
+ int iter = 0;
+ int maxEval = getMaxEvaluations();
+ while (true) {
+ ++iter;
+
+ final double objective = computeObjectiveValue(point);
+ PointValuePair previous = current;
+ current = new PointValuePair(point, objective);
+ if (previous != null && checker.converged(iter, previous, current)) {
+ // We have found an optimum.
+ return current;
+ }
+
+ // Find the optimal step in the search direction.
+ final UnivariateFunction lsf = new LineSearchFunction(searchDirection);
+ final double uB = findUpperBound(lsf, 0, initialStep);
+ // XXX Last parameters is set to a value close to zero in order to
+ // work around the divergence problem in the "testCircleFitting"
+ // unit test (see MATH-439).
+ final double step = solver.solve(maxEval, lsf, 0, uB, 1e-15);
+ maxEval -= solver.getEvaluations(); // Subtract used up evaluations.
+
+ // Validate new point.
+ for (int i = 0; i < point.length; ++i) {
+ point[i] += step * searchDirection[i];
+ }
+
+ r = computeObjectiveGradient(point);
+ if (goal == GoalType.MINIMIZE) {
+ for (int i = 0; i < n; ++i) {
+ r[i] = -r[i];
+ }
+ }
+
+ // Compute beta.
+ final double deltaOld = delta;
+ final double[] newSteepestDescent = preconditioner.precondition(point, r);
+ delta = 0;
+ for (int i = 0; i < n; ++i) {
+ delta += r[i] * newSteepestDescent[i];
+ }
+
+ final double beta;
+ if (updateFormula == ConjugateGradientFormula.FLETCHER_REEVES) {
+ beta = delta / deltaOld;
+ } else {
+ double deltaMid = 0;
+ for (int i = 0; i < r.length; ++i) {
+ deltaMid += r[i] * steepestDescent[i];
+ }
+ beta = (delta - deltaMid) / deltaOld;
+ }
+ steepestDescent = newSteepestDescent;
+
+ // Compute conjugate search direction.
+ if (iter % n == 0 ||
+ beta < 0) {
+ // Break conjugation: reset search direction.
+ searchDirection = steepestDescent.clone();
+ } else {
+ // Compute new conjugate search direction.
+ for (int i = 0; i < n; ++i) {
+ searchDirection[i] = steepestDescent[i] + beta * searchDirection[i];
+ }
+ }
+ }
+ }
+
+ /**
+ * Find the upper bound b ensuring bracketing of a root between a and b.
+ *
+ * @param f function whose root must be bracketed.
+ * @param a lower bound of the interval.
+ * @param h initial step to try.
+ * @return b such that f(a) and f(b) have opposite signs.
+ * @throws MathIllegalStateException if no bracket can be found.
+ */
+ private double findUpperBound(final UnivariateFunction f,
+ final double a, final double h) {
+ final double yA = f.value(a);
+ double yB = yA;
+ for (double step = h; step < Double.MAX_VALUE; step *= FastMath.max(2, yA / yB)) {
+ final double b = a + step;
+ yB = f.value(b);
+ if (yA * yB <= 0) {
+ return b;
+ }
+ }
+ throw new MathIllegalStateException(LocalizedFormats.UNABLE_TO_BRACKET_OPTIMUM_IN_LINE_SEARCH);
+ }
+
+ /** Default identity preconditioner. */
+ public static class IdentityPreconditioner implements Preconditioner {
+
+ /** {@inheritDoc} */
+ public double[] precondition(double[] variables, double[] r) {
+ return r.clone();
+ }
+ }
+
+ /** Internal class for line search.
+ * <p>
+ * The function represented by this class is the dot product of
+ * the objective function gradient and the search direction. Its
+ * value is zero when the gradient is orthogonal to the search
+ * direction, i.e. when the objective function value is a local
+ * extremum along the search direction.
+ * </p>
+ */
+ private class LineSearchFunction implements UnivariateFunction {
+ /** Search direction. */
+ private final double[] searchDirection;
+
+ /** Simple constructor.
+ * @param searchDirection search direction
+ */
+ LineSearchFunction(final double[] searchDirection) {
+ this.searchDirection = searchDirection;
+ }
+
+ /** {@inheritDoc} */
+ public double value(double x) {
+ // current point in the search direction
+ final double[] shiftedPoint = point.clone();
+ for (int i = 0; i < shiftedPoint.length; ++i) {
+ shiftedPoint[i] += x * searchDirection[i];
+ }
+
+ // gradient of the objective function
+ final double[] gradient = computeObjectiveGradient(shiftedPoint);
+
+ // dot product with the search direction
+ double dotProduct = 0;
+ for (int i = 0; i < gradient.length; ++i) {
+ dotProduct += gradient[i] * searchDirection[i];
+ }
+
+ return dotProduct;
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/Preconditioner.java b/src/main/java/org/apache/commons/math3/optimization/general/Preconditioner.java
new file mode 100644
index 0000000..7142e76
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/Preconditioner.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.commons.math3.optimization.general;
+
+/**
+ * This interface represents a preconditioner for differentiable scalar
+ * objective function optimizers.
+ * @deprecated As of 3.1 (to be removed in 4.0).
+ * @since 2.0
+ */
+@Deprecated
+public interface Preconditioner {
+ /**
+ * Precondition a search direction.
+ * <p>
+ * The returned preconditioned search direction must be computed fast or
+ * the algorithm performances will drop drastically. A classical approach
+ * is to compute only the diagonal elements of the hessian and to divide
+ * the raw search direction by these elements if they are all positive.
+ * If at least one of them is negative, it is safer to return a clone of
+ * the raw search direction as if the hessian was the identity matrix. The
+ * rationale for this simplified choice is that a negative diagonal element
+ * means the current point is far from the optimum and preconditioning will
+ * not be efficient anyway in this case.
+ * </p>
+ * @param point current point at which the search direction was computed
+ * @param r raw search direction (i.e. opposite of the gradient)
+ * @return approximation of H<sup>-1</sup>r where H is the objective function hessian
+ */
+ double[] precondition(double[] point, double[] r);
+}
diff --git a/src/main/java/org/apache/commons/math3/optimization/general/package-info.java b/src/main/java/org/apache/commons/math3/optimization/general/package-info.java
new file mode 100644
index 0000000..ba140ce
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/optimization/general/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ * This package provides optimization algorithms that require derivatives.
+ *
+ */
+package org.apache.commons.math3.optimization.general;