summaryrefslogtreecommitdiff
path: root/src/main/java/org/apache/commons/math3/fitting
diff options
context:
space:
mode:
Diffstat (limited to 'src/main/java/org/apache/commons/math3/fitting')
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/AbstractCurveFitter.java141
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/CurveFitter.java235
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/GaussianCurveFitter.java425
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java362
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/HarmonicCurveFitter.java410
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java386
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/PolynomialCurveFitter.java127
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java71
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/SimpleCurveFitter.java119
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java81
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoints.java100
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/AbstractEvaluation.java87
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/DenseWeightedEvaluation.java68
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/EvaluationRmsChecker.java75
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java299
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresAdapter.java77
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresBuilder.java226
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java532
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresOptimizer.java62
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresProblem.java156
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java1042
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/MultivariateJacobianFunction.java39
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/OptimumImpl.java97
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/ParameterValidator.java34
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/ValueAndJacobianFunction.java44
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/leastsquares/package-info.java39
-rw-r--r--src/main/java/org/apache/commons/math3/fitting/package-info.java25
27 files changed, 5359 insertions, 0 deletions
diff --git a/src/main/java/org/apache/commons/math3/fitting/AbstractCurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/AbstractCurveFitter.java
new file mode 100644
index 0000000..c3f7239
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/AbstractCurveFitter.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.ParametricUnivariateFunction;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresOptimizer;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem;
+import org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer;
+
+import java.util.Collection;
+
+/**
+ * Base class that contains common code for fitting parametric univariate real functions <code>
+ * y = f(p<sub>i</sub>;x)</code>, where {@code x} is the independent variable and the <code>
+ * p<sub>i</sub></code> are the <em>parameters</em>. <br>
+ * A fitter will find the optimal values of the parameters by <em>fitting</em> the curve so it
+ * remains very close to a set of {@code N} observed points <code>(x<sub>k</sub>, y<sub>k</sub>)
+ * </code>, {@code 0 <= k < N}. <br>
+ * An algorithm usually performs the fit by finding the parameter values that minimizes the
+ * objective function
+ *
+ * <pre><code>
+ * &sum;y<sub>k</sub> - f(x<sub>k</sub>)<sup>2</sup>,
+ * </code></pre>
+ *
+ * which is actually a least-squares problem. This class contains boilerplate code for calling the
+ * {@link #fit(Collection)} method for obtaining the parameters. The problem setup, such as the
+ * choice of optimization algorithm for fitting a specific function is delegated to subclasses.
+ *
+ * @since 3.3
+ */
+public abstract class AbstractCurveFitter {
+ /**
+ * Fits a curve. This method computes the coefficients of the curve that best fit the sample of
+ * observed points.
+ *
+ * @param points Observations.
+ * @return the fitted parameters.
+ */
+ public double[] fit(Collection<WeightedObservedPoint> points) {
+ // Perform the fit.
+ return getOptimizer().optimize(getProblem(points)).getPoint().toArray();
+ }
+
+ /**
+ * Creates an optimizer set up to fit the appropriate curve.
+ *
+ * <p>The default implementation uses a {@link LevenbergMarquardtOptimizer Levenberg-Marquardt}
+ * optimizer.
+ *
+ * @return the optimizer to use for fitting the curve to the given {@code points}.
+ */
+ protected LeastSquaresOptimizer getOptimizer() {
+ return new LevenbergMarquardtOptimizer();
+ }
+
+ /**
+ * Creates a least squares problem corresponding to the appropriate curve.
+ *
+ * @param points Sample points.
+ * @return the least squares problem to use for fitting the curve to the given {@code points}.
+ */
+ protected abstract LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> points);
+
+ /** Vector function for computing function theoretical values. */
+ protected static class TheoreticalValuesFunction {
+ /** Function to fit. */
+ private final ParametricUnivariateFunction f;
+
+ /** Observations. */
+ private final double[] points;
+
+ /**
+ * @param f function to fit.
+ * @param observations Observations.
+ */
+ public TheoreticalValuesFunction(
+ final ParametricUnivariateFunction f,
+ final Collection<WeightedObservedPoint> observations) {
+ this.f = f;
+
+ final int len = observations.size();
+ this.points = new double[len];
+ int i = 0;
+ for (WeightedObservedPoint obs : observations) {
+ this.points[i++] = obs.getX();
+ }
+ }
+
+ /**
+ * @return the model function values.
+ */
+ public MultivariateVectorFunction getModelFunction() {
+ return new MultivariateVectorFunction() {
+ /** {@inheritDoc} */
+ public double[] value(double[] p) {
+ final int len = points.length;
+ final double[] values = new double[len];
+ for (int i = 0; i < len; i++) {
+ values[i] = f.value(points[i], p);
+ }
+
+ return values;
+ }
+ };
+ }
+
+ /**
+ * @return the model function Jacobian.
+ */
+ public MultivariateMatrixFunction getModelFunctionJacobian() {
+ return new MultivariateMatrixFunction() {
+ /** {@inheritDoc} */
+ public double[][] value(double[] p) {
+ final int len = points.length;
+ final double[][] jacobian = new double[len][];
+ for (int i = 0; i < len; i++) {
+ jacobian[i] = f.gradient(points[i], p);
+ }
+ return jacobian;
+ }
+ };
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java
new file mode 100644
index 0000000..09dd7f2
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/CurveFitter.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.analysis.ParametricUnivariateFunction;
+import org.apache.commons.math3.optim.InitialGuess;
+import org.apache.commons.math3.optim.MaxEval;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.ModelFunctionJacobian;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.optim.nonlinear.vector.Target;
+import org.apache.commons.math3.optim.nonlinear.vector.Weight;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Fitter for parametric univariate real functions y = f(x). <br>
+ * When a univariate real function y = f(x) does depend on some unknown parameters p<sub>0</sub>,
+ * p<sub>1</sub> ... p<sub>n-1</sub>, this class can be used to find these parameters. It does this
+ * by <em>fitting</em> the curve so it remains very close to a set of observed points
+ * (x<sub>0</sub>, y<sub>0</sub>), (x<sub>1</sub>, y<sub>1</sub>) ... (x<sub>k-1</sub>,
+ * y<sub>k-1</sub>). This fitting is done by finding the parameters values that minimizes the
+ * objective function &sum;(y<sub>i</sub>-f(x<sub>i</sub>))<sup>2</sup>. This is really a least
+ * squares problem.
+ *
+ * @param <T> Function to use for the fit.
+ * @since 2.0
+ * @deprecated As of 3.3. Please use {@link AbstractCurveFitter} and {@link WeightedObservedPoints}
+ * instead.
+ */
+@Deprecated
+public class CurveFitter<T extends ParametricUnivariateFunction> {
+ /** Optimizer to use for the fitting. */
+ private final MultivariateVectorOptimizer optimizer;
+
+ /** Observed points. */
+ private final List<WeightedObservedPoint> observations;
+
+ /**
+ * Simple constructor.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ * @since 3.1
+ */
+ public CurveFitter(final MultivariateVectorOptimizer optimizer) {
+ this.optimizer = optimizer;
+ observations = new ArrayList<WeightedObservedPoint>();
+ }
+
+ /**
+ * Add an observed (x,y) point to the sample with unit weight.
+ *
+ * <p>Calling this method is equivalent to call {@code addObservedPoint(1.0, x, y)}.
+ *
+ * @param x abscissa of the point
+ * @param y observed value of the point at x, after fitting we should have f(x) as close as
+ * possible to this value
+ * @see #addObservedPoint(double, double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(double x, double y) {
+ addObservedPoint(1.0, x, y);
+ }
+
+ /**
+ * Add an observed weighted (x,y) point to the sample.
+ *
+ * @param weight weight of the observed point in the fit
+ * @param x abscissa of the point
+ * @param y observed value of the point at x, after fitting we should have f(x) as close as
+ * possible to this value
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(double weight, double x, double y) {
+ observations.add(new WeightedObservedPoint(weight, x, y));
+ }
+
+ /**
+ * Add an observed weighted (x,y) point to the sample.
+ *
+ * @param observed observed point to add
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(double, double, double)
+ * @see #getObservations()
+ */
+ public void addObservedPoint(WeightedObservedPoint observed) {
+ observations.add(observed);
+ }
+
+ /**
+ * Get the observed points.
+ *
+ * @return observed points
+ * @see #addObservedPoint(double, double)
+ * @see #addObservedPoint(double, double, double)
+ * @see #addObservedPoint(WeightedObservedPoint)
+ */
+ public WeightedObservedPoint[] getObservations() {
+ return observations.toArray(new WeightedObservedPoint[observations.size()]);
+ }
+
+ /** Remove all observations. */
+ public void clearObservations() {
+ observations.clear();
+ }
+
+ /**
+ * Fit a curve. This method compute the coefficients of the curve that best fit the sample of
+ * observed points previously given through calls to the {@link
+ * #addObservedPoint(WeightedObservedPoint) addObservedPoint} method.
+ *
+ * @param f parametric function to fit.
+ * @param initialGuess first guess of the function parameters.
+ * @return the fitted parameters.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException if the start point
+ * dimension is wrong.
+ */
+ public double[] fit(T f, final double[] initialGuess) {
+ return fit(Integer.MAX_VALUE, f, initialGuess);
+ }
+
+ /**
+ * Fit a curve. This method compute the coefficients of the curve that best fit the sample of
+ * observed points previously given through calls to the {@link
+ * #addObservedPoint(WeightedObservedPoint) addObservedPoint} method.
+ *
+ * @param f parametric function to fit.
+ * @param initialGuess first guess of the function parameters.
+ * @param maxEval Maximum number of function evaluations.
+ * @return the fitted parameters.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException if the number of
+ * allowed evaluations is exceeded.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException if the start point
+ * dimension is wrong.
+ * @since 3.0
+ */
+ public double[] fit(int maxEval, T f, final double[] initialGuess) {
+ // Prepare least squares problem.
+ double[] target = new double[observations.size()];
+ double[] weights = new double[observations.size()];
+ int i = 0;
+ for (WeightedObservedPoint point : observations) {
+ target[i] = point.getY();
+ weights[i] = point.getWeight();
+ ++i;
+ }
+
+ // Input to the optimizer: the model and its Jacobian.
+ final TheoreticalValuesFunction model = new TheoreticalValuesFunction(f);
+
+ // Perform the fit.
+ final PointVectorValuePair optimum =
+ optimizer.optimize(
+ new MaxEval(maxEval),
+ model.getModelFunction(),
+ model.getModelFunctionJacobian(),
+ new Target(target),
+ new Weight(weights),
+ new InitialGuess(initialGuess));
+ // Extract the coefficients.
+ return optimum.getPointRef();
+ }
+
+ /** Vectorial function computing function theoretical values. */
+ private class TheoreticalValuesFunction {
+ /** Function to fit. */
+ private final ParametricUnivariateFunction f;
+
+ /**
+ * @param f function to fit.
+ */
+ TheoreticalValuesFunction(final ParametricUnivariateFunction f) {
+ this.f = f;
+ }
+
+ /**
+ * @return the model function values.
+ */
+ public ModelFunction getModelFunction() {
+ return new ModelFunction(
+ new MultivariateVectorFunction() {
+ /** {@inheritDoc} */
+ public double[] value(double[] point) {
+ // compute the residuals
+ final double[] values = new double[observations.size()];
+ int i = 0;
+ for (WeightedObservedPoint observed : observations) {
+ values[i++] = f.value(observed.getX(), point);
+ }
+
+ return values;
+ }
+ });
+ }
+
+ /**
+ * @return the model function Jacobian.
+ */
+ public ModelFunctionJacobian getModelFunctionJacobian() {
+ return new ModelFunctionJacobian(
+ new MultivariateMatrixFunction() {
+ /** {@inheritDoc} */
+ public double[][] value(double[] point) {
+ final double[][] jacobian = new double[observations.size()][];
+ int i = 0;
+ for (WeightedObservedPoint observed : observations) {
+ jacobian[i++] = f.gradient(observed.getX(), point);
+ }
+ return jacobian;
+ }
+ });
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/GaussianCurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/GaussianCurveFitter.java
new file mode 100644
index 0000000..685df28
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/GaussianCurveFitter.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.function.Gaussian;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+import org.apache.commons.math3.util.FastMath;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+/**
+ * Fits points to a {@link org.apache.commons.math3.analysis.function.Gaussian.Parametric Gaussian}
+ * function. <br>
+ * The {@link #withStartPoint(double[]) initial guess values} must be passed in the following order:
+ *
+ * <ul>
+ * <li>Normalization
+ * <li>Mean
+ * <li>Sigma
+ * </ul>
+ *
+ * The optimal values will be returned in the same order.
+ *
+ * <p>Usage example:
+ *
+ * <pre>
+ * WeightedObservedPoints obs = new WeightedObservedPoints();
+ * obs.add(4.0254623, 531026.0);
+ * obs.add(4.03128248, 984167.0);
+ * obs.add(4.03839603, 1887233.0);
+ * obs.add(4.04421621, 2687152.0);
+ * obs.add(4.05132976, 3461228.0);
+ * obs.add(4.05326982, 3580526.0);
+ * obs.add(4.05779662, 3439750.0);
+ * obs.add(4.0636168, 2877648.0);
+ * obs.add(4.06943698, 2175960.0);
+ * obs.add(4.07525716, 1447024.0);
+ * obs.add(4.08237071, 717104.0);
+ * obs.add(4.08366408, 620014.0);
+ * double[] parameters = GaussianCurveFitter.create().fit(obs.toList());
+ * </pre>
+ *
+ * @since 3.3
+ */
+public class GaussianCurveFitter extends AbstractCurveFitter {
+ /** Parametric function to be fitted. */
+ private static final Gaussian.Parametric FUNCTION =
+ new Gaussian.Parametric() {
+ /** {@inheritDoc} */
+ @Override
+ public double value(double x, double... p) {
+ double v = Double.POSITIVE_INFINITY;
+ try {
+ v = super.value(x, p);
+ } catch (NotStrictlyPositiveException e) { // NOPMD
+ // Do nothing.
+ }
+ return v;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public double[] gradient(double x, double... p) {
+ double[] v = {
+ Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY
+ };
+ try {
+ v = super.gradient(x, p);
+ } catch (NotStrictlyPositiveException e) { // NOPMD
+ // Do nothing.
+ }
+ return v;
+ }
+ };
+
+ /** Initial guess. */
+ private final double[] initialGuess;
+
+ /** Maximum number of iterations of the optimization algorithm. */
+ private final int maxIter;
+
+ /**
+ * Contructor used by the factory methods.
+ *
+ * @param initialGuess Initial guess. If set to {@code null}, the initial guess will be
+ * estimated using the {@link ParameterGuesser}.
+ * @param maxIter Maximum number of iterations of the optimization algorithm.
+ */
+ private GaussianCurveFitter(double[] initialGuess, int maxIter) {
+ this.initialGuess = initialGuess;
+ this.maxIter = maxIter;
+ }
+
+ /**
+ * Creates a default curve fitter. The initial guess for the parameters will be {@link
+ * ParameterGuesser} computed automatically, and the maximum number of iterations of the
+ * optimization algorithm is set to {@link Integer#MAX_VALUE}.
+ *
+ * @return a curve fitter.
+ * @see #withStartPoint(double[])
+ * @see #withMaxIterations(int)
+ */
+ public static GaussianCurveFitter create() {
+ return new GaussianCurveFitter(null, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Configure the start point (initial guess).
+ *
+ * @param newStart new start point (initial guess)
+ * @return a new instance.
+ */
+ public GaussianCurveFitter withStartPoint(double[] newStart) {
+ return new GaussianCurveFitter(newStart.clone(), maxIter);
+ }
+
+ /**
+ * Configure the maximum number of iterations.
+ *
+ * @param newMaxIter maximum number of iterations
+ * @return a new instance.
+ */
+ public GaussianCurveFitter withMaxIterations(int newMaxIter) {
+ return new GaussianCurveFitter(initialGuess, newMaxIter);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
+
+ // Prepare least-squares problem.
+ final int len = observations.size();
+ final double[] target = new double[len];
+ final double[] weights = new double[len];
+
+ int i = 0;
+ for (WeightedObservedPoint obs : observations) {
+ target[i] = obs.getY();
+ weights[i] = obs.getWeight();
+ ++i;
+ }
+
+ final AbstractCurveFitter.TheoreticalValuesFunction model =
+ new AbstractCurveFitter.TheoreticalValuesFunction(FUNCTION, observations);
+
+ final double[] startPoint =
+ initialGuess != null
+ ? initialGuess
+ :
+ // Compute estimation.
+ new ParameterGuesser(observations).guess();
+
+ // Return a new least squares problem set up to fit a Gaussian curve to the
+ // observed points.
+ return new LeastSquaresBuilder()
+ .maxEvaluations(Integer.MAX_VALUE)
+ .maxIterations(maxIter)
+ .start(startPoint)
+ .target(target)
+ .weight(new DiagonalMatrix(weights))
+ .model(model.getModelFunction(), model.getModelFunctionJacobian())
+ .build();
+ }
+
+ /**
+ * Guesses the parameters {@code norm}, {@code mean}, and {@code sigma} of a {@link
+ * org.apache.commons.math3.analysis.function.Gaussian.Parametric} based on the specified
+ * observed points.
+ */
+ public static class ParameterGuesser {
+ /** Normalization factor. */
+ private final double norm;
+
+ /** Mean. */
+ private final double mean;
+
+ /** Standard deviation. */
+ private final double sigma;
+
+ /**
+ * Constructs instance with the specified observed points.
+ *
+ * @param observations Observed points from which to guess the parameters of the Gaussian.
+ * @throws NullArgumentException if {@code observations} is {@code null}.
+ * @throws NumberIsTooSmallException if there are less than 3 observations.
+ */
+ public ParameterGuesser(Collection<WeightedObservedPoint> observations) {
+ if (observations == null) {
+ throw new NullArgumentException(LocalizedFormats.INPUT_ARRAY);
+ }
+ if (observations.size() < 3) {
+ throw new NumberIsTooSmallException(observations.size(), 3, true);
+ }
+
+ final List<WeightedObservedPoint> sorted = sortObservations(observations);
+ final double[] params = basicGuess(sorted.toArray(new WeightedObservedPoint[0]));
+
+ norm = params[0];
+ mean = params[1];
+ sigma = params[2];
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ * <ul>
+ * <li>Normalization factor
+ * <li>Mean
+ * <li>Standard deviation
+ * </ul>
+ */
+ public double[] guess() {
+ return new double[] {norm, mean, sigma};
+ }
+
+ /**
+ * Sort the observations.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private List<WeightedObservedPoint> sortObservations(
+ Collection<WeightedObservedPoint> unsorted) {
+ final List<WeightedObservedPoint> observations =
+ new ArrayList<WeightedObservedPoint>(unsorted);
+
+ final Comparator<WeightedObservedPoint> cmp =
+ new Comparator<WeightedObservedPoint>() {
+ /** {@inheritDoc} */
+ public int compare(WeightedObservedPoint p1, WeightedObservedPoint p2) {
+ if (p1 == null && p2 == null) {
+ return 0;
+ }
+ if (p1 == null) {
+ return -1;
+ }
+ if (p2 == null) {
+ return 1;
+ }
+ final int cmpX = Double.compare(p1.getX(), p2.getX());
+ if (cmpX < 0) {
+ return -1;
+ }
+ if (cmpX > 0) {
+ return 1;
+ }
+ final int cmpY = Double.compare(p1.getY(), p2.getY());
+ if (cmpY < 0) {
+ return -1;
+ }
+ if (cmpY > 0) {
+ return 1;
+ }
+ final int cmpW = Double.compare(p1.getWeight(), p2.getWeight());
+ if (cmpW < 0) {
+ return -1;
+ }
+ if (cmpW > 0) {
+ return 1;
+ }
+ return 0;
+ }
+ };
+
+ Collections.sort(observations, cmp);
+ return observations;
+ }
+
+ /**
+ * Guesses the parameters based on the specified observed points.
+ *
+ * @param points Observed points, sorted.
+ * @return the guessed parameters (normalization factor, mean and sigma).
+ */
+ private double[] basicGuess(WeightedObservedPoint[] points) {
+ final int maxYIdx = findMaxY(points);
+ final double n = points[maxYIdx].getY();
+ final double m = points[maxYIdx].getX();
+
+ double fwhmApprox;
+ try {
+ final double halfY = n + ((m - n) / 2);
+ final double fwhmX1 = interpolateXAtY(points, maxYIdx, -1, halfY);
+ final double fwhmX2 = interpolateXAtY(points, maxYIdx, 1, halfY);
+ fwhmApprox = fwhmX2 - fwhmX1;
+ } catch (OutOfRangeException e) {
+ // TODO: Exceptions should not be used for flow control.
+ fwhmApprox = points[points.length - 1].getX() - points[0].getX();
+ }
+ final double s = fwhmApprox / (2 * FastMath.sqrt(2 * FastMath.log(2)));
+
+ return new double[] {n, m, s};
+ }
+
+ /**
+ * Finds index of point in specified points with the largest Y.
+ *
+ * @param points Points to search.
+ * @return the index in specified points array.
+ */
+ private int findMaxY(WeightedObservedPoint[] points) {
+ int maxYIdx = 0;
+ for (int i = 1; i < points.length; i++) {
+ if (points[i].getY() > points[maxYIdx].getY()) {
+ maxYIdx = i;
+ }
+ }
+ return maxYIdx;
+ }
+
+ /**
+ * Interpolates using the specified points to determine X at the specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start the search for interpolation
+ * bounds points.
+ * @param idxStep Index step for searching interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the value of X for the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the range of the
+ * specified {@code points}.
+ */
+ private double interpolateXAtY(
+ WeightedObservedPoint[] points, int startIdx, int idxStep, double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ final WeightedObservedPoint[] twoPoints =
+ getInterpolationPointsForY(points, startIdx, idxStep, y);
+ final WeightedObservedPoint p1 = twoPoints[0];
+ final WeightedObservedPoint p2 = twoPoints[1];
+ if (p1.getY() == y) {
+ return p1.getX();
+ }
+ if (p2.getY() == y) {
+ return p2.getX();
+ }
+ return p1.getX()
+ + (((y - p1.getY()) * (p2.getX() - p1.getX())) / (p2.getY() - p1.getY()));
+ }
+
+ /**
+ * Gets the two bounding interpolation points from the specified points suitable for
+ * determining X at the specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start search for interpolation bounds
+ * points.
+ * @param idxStep Index step for search for interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the array containing two points suitable for determining X at the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the range of the
+ * specified {@code points}.
+ */
+ private WeightedObservedPoint[] getInterpolationPointsForY(
+ WeightedObservedPoint[] points, int startIdx, int idxStep, double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ for (int i = startIdx;
+ idxStep < 0 ? i + idxStep >= 0 : i + idxStep < points.length;
+ i += idxStep) {
+ final WeightedObservedPoint p1 = points[i];
+ final WeightedObservedPoint p2 = points[i + idxStep];
+ if (isBetween(y, p1.getY(), p2.getY())) {
+ if (idxStep < 0) {
+ return new WeightedObservedPoint[] {p2, p1};
+ } else {
+ return new WeightedObservedPoint[] {p1, p2};
+ }
+ }
+ }
+
+ // Boundaries are replaced by dummy values because the raised
+ // exception is caught and the message never displayed.
+ // TODO: Exceptions should not be used for flow control.
+ throw new OutOfRangeException(y, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ }
+
+ /**
+ * Determines whether a value is between two other values.
+ *
+ * @param value Value to test whether it is between {@code boundary1} and {@code boundary2}.
+ * @param boundary1 One end of the range.
+ * @param boundary2 Other end of the range.
+ * @return {@code true} if {@code value} is between {@code boundary1} and {@code boundary2}
+ * (inclusive), {@code false} otherwise.
+ */
+ private boolean isBetween(double value, double boundary1, double boundary2) {
+ return (value >= boundary1 && value <= boundary2)
+ || (value >= boundary2 && value <= boundary1);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java b/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java
new file mode 100644
index 0000000..fe25c05
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/GaussianFitter.java
@@ -0,0 +1,362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.function.Gaussian;
+import org.apache.commons.math3.exception.NotStrictlyPositiveException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.OutOfRangeException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+import java.util.Arrays;
+import java.util.Comparator;
+
+/**
+ * Fits points to a {@link org.apache.commons.math3.analysis.function.Gaussian.Parametric Gaussian}
+ * function.
+ *
+ * <p>Usage example:
+ *
+ * <pre>
+ * GaussianFitter fitter = new GaussianFitter(
+ * new LevenbergMarquardtOptimizer());
+ * fitter.addObservedPoint(4.0254623, 531026.0);
+ * fitter.addObservedPoint(4.03128248, 984167.0);
+ * fitter.addObservedPoint(4.03839603, 1887233.0);
+ * fitter.addObservedPoint(4.04421621, 2687152.0);
+ * fitter.addObservedPoint(4.05132976, 3461228.0);
+ * fitter.addObservedPoint(4.05326982, 3580526.0);
+ * fitter.addObservedPoint(4.05779662, 3439750.0);
+ * fitter.addObservedPoint(4.0636168, 2877648.0);
+ * fitter.addObservedPoint(4.06943698, 2175960.0);
+ * fitter.addObservedPoint(4.07525716, 1447024.0);
+ * fitter.addObservedPoint(4.08237071, 717104.0);
+ * fitter.addObservedPoint(4.08366408, 620014.0);
+ * double[] parameters = fitter.fit();
+ * </pre>
+ *
+ * @since 2.2
+ * @deprecated As of 3.3. Please use {@link GaussianCurveFitter} and {@link WeightedObservedPoints}
+ * instead.
+ */
+@Deprecated
+public class GaussianFitter extends CurveFitter<Gaussian.Parametric> {
+ /**
+ * Constructs an instance using the specified optimizer.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public GaussianFitter(MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Fits a Gaussian function to the observed points.
+ *
+ * @param initialGuess First guess values in the following order:
+ * <ul>
+ * <li>Norm
+ * <li>Mean
+ * <li>Sigma
+ * </ul>
+ *
+ * @return the parameters of the Gaussian function that best fits the observed points (in the
+ * same order as above).
+ * @since 3.0
+ */
+ public double[] fit(double[] initialGuess) {
+ final Gaussian.Parametric f =
+ new Gaussian.Parametric() {
+ /** {@inheritDoc} */
+ @Override
+ public double value(double x, double... p) {
+ double v = Double.POSITIVE_INFINITY;
+ try {
+ v = super.value(x, p);
+ } catch (NotStrictlyPositiveException e) { // NOPMD
+ // Do nothing.
+ }
+ return v;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public double[] gradient(double x, double... p) {
+ double[] v = {
+ Double.POSITIVE_INFINITY,
+ Double.POSITIVE_INFINITY,
+ Double.POSITIVE_INFINITY
+ };
+ try {
+ v = super.gradient(x, p);
+ } catch (NotStrictlyPositiveException e) { // NOPMD
+ // Do nothing.
+ }
+ return v;
+ }
+ };
+
+ return fit(f, initialGuess);
+ }
+
+ /**
+ * Fits a Gaussian function to the observed points.
+ *
+ * @return the parameters of the Gaussian function that best fits the observed points (in the
+ * same order as above).
+ */
+ public double[] fit() {
+ final double[] guess = (new ParameterGuesser(getObservations())).guess();
+ return fit(guess);
+ }
+
+ /**
+ * Guesses the parameters {@code norm}, {@code mean}, and {@code sigma} of a {@link
+ * org.apache.commons.math3.analysis.function.Gaussian.Parametric} based on the specified
+ * observed points.
+ */
+ public static class ParameterGuesser {
+ /** Normalization factor. */
+ private final double norm;
+
+ /** Mean. */
+ private final double mean;
+
+ /** Standard deviation. */
+ private final double sigma;
+
+ /**
+ * Constructs instance with the specified observed points.
+ *
+ * @param observations Observed points from which to guess the parameters of the Gaussian.
+ * @throws NullArgumentException if {@code observations} is {@code null}.
+ * @throws NumberIsTooSmallException if there are less than 3 observations.
+ */
+ public ParameterGuesser(WeightedObservedPoint[] observations) {
+ if (observations == null) {
+ throw new NullArgumentException(LocalizedFormats.INPUT_ARRAY);
+ }
+ if (observations.length < 3) {
+ throw new NumberIsTooSmallException(observations.length, 3, true);
+ }
+
+ final WeightedObservedPoint[] sorted = sortObservations(observations);
+ final double[] params = basicGuess(sorted);
+
+ norm = params[0];
+ mean = params[1];
+ sigma = params[2];
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ * <ul>
+ * <li>Normalization factor
+ * <li>Mean
+ * <li>Standard deviation
+ * </ul>
+ */
+ public double[] guess() {
+ return new double[] {norm, mean, sigma};
+ }
+
+ /**
+ * Sort the observations.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
+ final WeightedObservedPoint[] observations = unsorted.clone();
+ final Comparator<WeightedObservedPoint> cmp =
+ new Comparator<WeightedObservedPoint>() {
+ /** {@inheritDoc} */
+ public int compare(WeightedObservedPoint p1, WeightedObservedPoint p2) {
+ if (p1 == null && p2 == null) {
+ return 0;
+ }
+ if (p1 == null) {
+ return -1;
+ }
+ if (p2 == null) {
+ return 1;
+ }
+ final int cmpX = Double.compare(p1.getX(), p2.getX());
+ if (cmpX < 0) {
+ return -1;
+ }
+ if (cmpX > 0) {
+ return 1;
+ }
+ final int cmpY = Double.compare(p1.getY(), p2.getY());
+ if (cmpY < 0) {
+ return -1;
+ }
+ if (cmpY > 0) {
+ return 1;
+ }
+ final int cmpW = Double.compare(p1.getWeight(), p2.getWeight());
+ if (cmpW < 0) {
+ return -1;
+ }
+ if (cmpW > 0) {
+ return 1;
+ }
+ return 0;
+ }
+ };
+
+ Arrays.sort(observations, cmp);
+ return observations;
+ }
+
+ /**
+ * Guesses the parameters based on the specified observed points.
+ *
+ * @param points Observed points, sorted.
+ * @return the guessed parameters (normalization factor, mean and sigma).
+ */
+ private double[] basicGuess(WeightedObservedPoint[] points) {
+ final int maxYIdx = findMaxY(points);
+ final double n = points[maxYIdx].getY();
+ final double m = points[maxYIdx].getX();
+
+ double fwhmApprox;
+ try {
+ final double halfY = n + ((m - n) / 2);
+ final double fwhmX1 = interpolateXAtY(points, maxYIdx, -1, halfY);
+ final double fwhmX2 = interpolateXAtY(points, maxYIdx, 1, halfY);
+ fwhmApprox = fwhmX2 - fwhmX1;
+ } catch (OutOfRangeException e) {
+ // TODO: Exceptions should not be used for flow control.
+ fwhmApprox = points[points.length - 1].getX() - points[0].getX();
+ }
+ final double s = fwhmApprox / (2 * FastMath.sqrt(2 * FastMath.log(2)));
+
+ return new double[] {n, m, s};
+ }
+
+ /**
+ * Finds index of point in specified points with the largest Y.
+ *
+ * @param points Points to search.
+ * @return the index in specified points array.
+ */
+ private int findMaxY(WeightedObservedPoint[] points) {
+ int maxYIdx = 0;
+ for (int i = 1; i < points.length; i++) {
+ if (points[i].getY() > points[maxYIdx].getY()) {
+ maxYIdx = i;
+ }
+ }
+ return maxYIdx;
+ }
+
+ /**
+ * Interpolates using the specified points to determine X at the specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start the search for interpolation
+ * bounds points.
+ * @param idxStep Index step for searching interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the value of X for the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the range of the
+ * specified {@code points}.
+ */
+ private double interpolateXAtY(
+ WeightedObservedPoint[] points, int startIdx, int idxStep, double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ final WeightedObservedPoint[] twoPoints =
+ getInterpolationPointsForY(points, startIdx, idxStep, y);
+ final WeightedObservedPoint p1 = twoPoints[0];
+ final WeightedObservedPoint p2 = twoPoints[1];
+ if (p1.getY() == y) {
+ return p1.getX();
+ }
+ if (p2.getY() == y) {
+ return p2.getX();
+ }
+ return p1.getX()
+ + (((y - p1.getY()) * (p2.getX() - p1.getX())) / (p2.getY() - p1.getY()));
+ }
+
+ /**
+ * Gets the two bounding interpolation points from the specified points suitable for
+ * determining X at the specified Y.
+ *
+ * @param points Points to use for interpolation.
+ * @param startIdx Index within points from which to start search for interpolation bounds
+ * points.
+ * @param idxStep Index step for search for interpolation bounds points.
+ * @param y Y value for which X should be determined.
+ * @return the array containing two points suitable for determining X at the specified Y.
+ * @throws ZeroException if {@code idxStep} is 0.
+ * @throws OutOfRangeException if specified {@code y} is not within the range of the
+ * specified {@code points}.
+ */
+ private WeightedObservedPoint[] getInterpolationPointsForY(
+ WeightedObservedPoint[] points, int startIdx, int idxStep, double y)
+ throws OutOfRangeException {
+ if (idxStep == 0) {
+ throw new ZeroException();
+ }
+ for (int i = startIdx;
+ idxStep < 0 ? i + idxStep >= 0 : i + idxStep < points.length;
+ i += idxStep) {
+ final WeightedObservedPoint p1 = points[i];
+ final WeightedObservedPoint p2 = points[i + idxStep];
+ if (isBetween(y, p1.getY(), p2.getY())) {
+ if (idxStep < 0) {
+ return new WeightedObservedPoint[] {p2, p1};
+ } else {
+ return new WeightedObservedPoint[] {p1, p2};
+ }
+ }
+ }
+
+ // Boundaries are replaced by dummy values because the raised
+ // exception is caught and the message never displayed.
+ // TODO: Exceptions should not be used for flow control.
+ throw new OutOfRangeException(y, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ }
+
+ /**
+ * Determines whether a value is between two other values.
+ *
+ * @param value Value to test whether it is between {@code boundary1} and {@code boundary2}.
+ * @param boundary1 One end of the range.
+ * @param boundary2 Other end of the range.
+ * @return {@code true} if {@code value} is between {@code boundary1} and {@code boundary2}
+ * (inclusive), {@code false} otherwise.
+ */
+ private boolean isBetween(double value, double boundary1, double boundary2) {
+ return (value >= boundary1 && value <= boundary2)
+ || (value >= boundary2 && value <= boundary1);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/HarmonicCurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/HarmonicCurveFitter.java
new file mode 100644
index 0000000..29a49c7
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/HarmonicCurveFitter.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.function.HarmonicOscillator;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+import org.apache.commons.math3.util.FastMath;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ * Fits points to a {@link org.apache.commons.math3.analysis.function.HarmonicOscillator.Parametric
+ * harmonic oscillator} function. <br>
+ * The {@link #withStartPoint(double[]) initial guess values} must be passed in the following order:
+ *
+ * <ul>
+ * <li>Amplitude
+ * <li>Angular frequency
+ * <li>phase
+ * </ul>
+ *
+ * The optimal values will be returned in the same order.
+ *
+ * @since 3.3
+ */
+public class HarmonicCurveFitter extends AbstractCurveFitter {
+ /** Parametric function to be fitted. */
+ private static final HarmonicOscillator.Parametric FUNCTION =
+ new HarmonicOscillator.Parametric();
+
+ /** Initial guess. */
+ private final double[] initialGuess;
+
+ /** Maximum number of iterations of the optimization algorithm. */
+ private final int maxIter;
+
+ /**
+ * Contructor used by the factory methods.
+ *
+ * @param initialGuess Initial guess. If set to {@code null}, the initial guess will be
+ * estimated using the {@link ParameterGuesser}.
+ * @param maxIter Maximum number of iterations of the optimization algorithm.
+ */
+ private HarmonicCurveFitter(double[] initialGuess, int maxIter) {
+ this.initialGuess = initialGuess;
+ this.maxIter = maxIter;
+ }
+
+ /**
+ * Creates a default curve fitter. The initial guess for the parameters will be {@link
+ * ParameterGuesser} computed automatically, and the maximum number of iterations of the
+ * optimization algorithm is set to {@link Integer#MAX_VALUE}.
+ *
+ * @return a curve fitter.
+ * @see #withStartPoint(double[])
+ * @see #withMaxIterations(int)
+ */
+ public static HarmonicCurveFitter create() {
+ return new HarmonicCurveFitter(null, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Configure the start point (initial guess).
+ *
+ * @param newStart new start point (initial guess)
+ * @return a new instance.
+ */
+ public HarmonicCurveFitter withStartPoint(double[] newStart) {
+ return new HarmonicCurveFitter(newStart.clone(), maxIter);
+ }
+
+ /**
+ * Configure the maximum number of iterations.
+ *
+ * @param newMaxIter maximum number of iterations
+ * @return a new instance.
+ */
+ public HarmonicCurveFitter withMaxIterations(int newMaxIter) {
+ return new HarmonicCurveFitter(initialGuess, newMaxIter);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
+ // Prepare least-squares problem.
+ final int len = observations.size();
+ final double[] target = new double[len];
+ final double[] weights = new double[len];
+
+ int i = 0;
+ for (WeightedObservedPoint obs : observations) {
+ target[i] = obs.getY();
+ weights[i] = obs.getWeight();
+ ++i;
+ }
+
+ final AbstractCurveFitter.TheoreticalValuesFunction model =
+ new AbstractCurveFitter.TheoreticalValuesFunction(FUNCTION, observations);
+
+ final double[] startPoint =
+ initialGuess != null
+ ? initialGuess
+ :
+ // Compute estimation.
+ new ParameterGuesser(observations).guess();
+
+ // Return a new optimizer set up to fit a Gaussian curve to the
+ // observed points.
+ return new LeastSquaresBuilder()
+ .maxEvaluations(Integer.MAX_VALUE)
+ .maxIterations(maxIter)
+ .start(startPoint)
+ .target(target)
+ .weight(new DiagonalMatrix(weights))
+ .model(model.getModelFunction(), model.getModelFunctionJacobian())
+ .build();
+ }
+
+ /**
+ * This class guesses harmonic coefficients from a sample.
+ *
+ * <p>The algorithm used to guess the coefficients is as follows:
+ *
+ * <p>We know \( f(t) \) at some sampling points \( t_i \) and want to find \( a \), \( \omega
+ * \) and \( \phi \) such that \( f(t) = a \cos (\omega t + \phi) \).
+ *
+ * <p>From the analytical expression, we can compute two primitives : \[ If2(t) = \int f^2 dt =
+ * a^2 (t + S(t)) / 2 \] \[ If'2(t) = \int f'^2 dt = a^2 \omega^2 (t - S(t)) / 2 \] where \(S(t)
+ * = \frac{\sin(2 (\omega t + \phi))}{2\omega}\)
+ *
+ * <p>We can remove \(S\) between these expressions : \[ If'2(t) = a^2 \omega^2 t - \omega^2
+ * If2(t) \]
+ *
+ * <p>The preceding expression shows that \(If'2 (t)\) is a linear combination of both \(t\) and
+ * \(If2(t)\): \[ If'2(t) = A t + B If2(t) \]
+ *
+ * <p>From the primitive, we can deduce the same form for definite integrals between \(t_1\) and
+ * \(t_i\) for each \(t_i\) : \[ If2(t_i) - If2(t_1) = A (t_i - t_1) + B (If2 (t_i) - If2(t_1))
+ * \]
+ *
+ * <p>We can find the coefficients \(A\) and \(B\) that best fit the sample to this linear
+ * expression by computing the definite integrals for each sample points.
+ *
+ * <p>For a bilinear expression \(z(x_i, y_i) = A x_i + B y_i\), the coefficients \(A\) and
+ * \(B\) that minimize a least-squares criterion \(\sum (z_i - z(x_i, y_i))^2\) are given by
+ * these expressions: \[ A = \frac{\sum y_i y_i \sum x_i z_i - \sum x_i y_i \sum y_i z_i} {\sum
+ * x_i x_i \sum y_i y_i - \sum x_i y_i \sum x_i y_i} \] \[ B = \frac{\sum x_i x_i \sum y_i z_i -
+ * \sum x_i y_i \sum x_i z_i} {\sum x_i x_i \sum y_i y_i - \sum x_i y_i \sum x_i y_i}
+ *
+ * <p>\]
+ *
+ * <p>In fact, we can assume that both \(a\) and \(\omega\) are positive and compute them
+ * directly, knowing that \(A = a^2 \omega^2\) and that \(B = -\omega^2\). The complete
+ * algorithm is therefore: For each \(t_i\) from \(t_1\) to \(t_{n-1}\), compute: \[ f(t_i) \]
+ * \[ f'(t_i) = \frac{f (t_{i+1}) - f(t_{i-1})}{t_{i+1} - t_{i-1}} \] \[ x_i = t_i - t_1 \] \[
+ * y_i = \int_{t_1}^{t_i} f^2(t) dt \] \[ z_i = \int_{t_1}^{t_i} f'^2(t) dt \] and update the
+ * sums: \[ \sum x_i x_i, \sum y_i y_i, \sum x_i y_i, \sum x_i z_i, \sum y_i z_i \]
+ *
+ * <p>Then: \[ a = \sqrt{\frac{\sum y_i y_i \sum x_i z_i - \sum x_i y_i \sum y_i z_i } {\sum x_i
+ * y_i \sum x_i z_i - \sum x_i x_i \sum y_i z_i }} \] \[ \omega = \sqrt{\frac{\sum x_i y_i \sum
+ * x_i z_i - \sum x_i x_i \sum y_i z_i} {\sum x_i x_i \sum y_i y_i - \sum x_i y_i \sum x_i y_i}}
+ * \]
+ *
+ * <p>Once we know \(\omega\) we can compute: \[ fc = \omega f(t) \cos(\omega t) - f'(t)
+ * \sin(\omega t) \] \[ fs = \omega f(t) \sin(\omega t) + f'(t) \cos(\omega t) \]
+ *
+ * <p>It appears that \(fc = a \omega \cos(\phi)\) and \(fs = -a \omega \sin(\phi)\), so we can
+ * use these expressions to compute \(\phi\). The best estimate over the sample is given by
+ * averaging these expressions.
+ *
+ * <p>Since integrals and means are involved in the preceding estimations, these operations run
+ * in \(O(n)\) time, where \(n\) is the number of measurements.
+ */
+ public static class ParameterGuesser {
+ /** Amplitude. */
+ private final double a;
+
+ /** Angular frequency. */
+ private final double omega;
+
+ /** Phase. */
+ private final double phi;
+
+ /**
+ * Simple constructor.
+ *
+ * @param observations Sampled observations.
+ * @throws NumberIsTooSmallException if the sample is too short.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot produce sensible
+ * results.
+ */
+ public ParameterGuesser(Collection<WeightedObservedPoint> observations) {
+ if (observations.size() < 4) {
+ throw new NumberIsTooSmallException(
+ LocalizedFormats.INSUFFICIENT_OBSERVED_POINTS_IN_SAMPLE,
+ observations.size(),
+ 4,
+ true);
+ }
+
+ final WeightedObservedPoint[] sorted =
+ sortObservations(observations).toArray(new WeightedObservedPoint[0]);
+
+ final double aOmega[] = guessAOmega(sorted);
+ a = aOmega[0];
+ omega = aOmega[1];
+
+ phi = guessPhi(sorted);
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ * <ul>
+ * <li>Amplitude
+ * <li>Angular frequency
+ * <li>Phase
+ * </ul>
+ */
+ public double[] guess() {
+ return new double[] {a, omega, phi};
+ }
+
+ /**
+ * Sort the observations with respect to the abscissa.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private List<WeightedObservedPoint> sortObservations(
+ Collection<WeightedObservedPoint> unsorted) {
+ final List<WeightedObservedPoint> observations =
+ new ArrayList<WeightedObservedPoint>(unsorted);
+
+ // Since the samples are almost always already sorted, this
+ // method is implemented as an insertion sort that reorders the
+ // elements in place. Insertion sort is very efficient in this case.
+ WeightedObservedPoint curr = observations.get(0);
+ final int len = observations.size();
+ for (int j = 1; j < len; j++) {
+ WeightedObservedPoint prec = curr;
+ curr = observations.get(j);
+ if (curr.getX() < prec.getX()) {
+ // the current element should be inserted closer to the beginning
+ int i = j - 1;
+ WeightedObservedPoint mI = observations.get(i);
+ while ((i >= 0) && (curr.getX() < mI.getX())) {
+ observations.set(i + 1, mI);
+ if (i-- != 0) {
+ mI = observations.get(i);
+ }
+ }
+ observations.set(i + 1, curr);
+ curr = observations.get(j);
+ }
+ }
+
+ return observations;
+ }
+
+ /**
+ * Estimate a first guess of the amplitude and angular frequency.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot produce sensible
+ * results.
+ * @return the guessed amplitude (at index 0) and circular frequency (at index 1).
+ */
+ private double[] guessAOmega(WeightedObservedPoint[] observations) {
+ final double[] aOmega = new double[2];
+
+ // initialize the sums for the linear model between the two integrals
+ double sx2 = 0;
+ double sy2 = 0;
+ double sxy = 0;
+ double sxz = 0;
+ double syz = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ double f2Integral = 0;
+ double fPrime2Integral = 0;
+ final double startX = currentX;
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+
+ // update the integrals of f<sup>2</sup> and f'<sup>2</sup>
+ // considering a linear model for f (and therefore constant f')
+ final double dx = currentX - previousX;
+ final double dy = currentY - previousY;
+ final double f2StepIntegral =
+ dx
+ * (previousY * previousY
+ + previousY * currentY
+ + currentY * currentY)
+ / 3;
+ final double fPrime2StepIntegral = dy * dy / dx;
+
+ final double x = currentX - startX;
+ f2Integral += f2StepIntegral;
+ fPrime2Integral += fPrime2StepIntegral;
+
+ sx2 += x * x;
+ sy2 += f2Integral * f2Integral;
+ sxy += x * f2Integral;
+ sxz += x * fPrime2Integral;
+ syz += f2Integral * fPrime2Integral;
+ }
+
+ // compute the amplitude and pulsation coefficients
+ double c1 = sy2 * sxz - sxy * syz;
+ double c2 = sxy * sxz - sx2 * syz;
+ double c3 = sx2 * sy2 - sxy * sxy;
+ if ((c1 / c2 < 0) || (c2 / c3 < 0)) {
+ final int last = observations.length - 1;
+ // Range of the observations, assuming that the
+ // observations are sorted.
+ final double xRange = observations[last].getX() - observations[0].getX();
+ if (xRange == 0) {
+ throw new ZeroException();
+ }
+ aOmega[1] = 2 * Math.PI / xRange;
+
+ double yMin = Double.POSITIVE_INFINITY;
+ double yMax = Double.NEGATIVE_INFINITY;
+ for (int i = 1; i < observations.length; ++i) {
+ final double y = observations[i].getY();
+ if (y < yMin) {
+ yMin = y;
+ }
+ if (y > yMax) {
+ yMax = y;
+ }
+ }
+ aOmega[0] = 0.5 * (yMax - yMin);
+ } else {
+ if (c2 == 0) {
+ // In some ill-conditioned cases (cf. MATH-844), the guesser
+ // procedure cannot produce sensible results.
+ throw new MathIllegalStateException(LocalizedFormats.ZERO_DENOMINATOR);
+ }
+
+ aOmega[0] = FastMath.sqrt(c1 / c2);
+ aOmega[1] = FastMath.sqrt(c2 / c3);
+ }
+
+ return aOmega;
+ }
+
+ /**
+ * Estimate a first guess of the phase.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @return the guessed phase.
+ */
+ private double guessPhi(WeightedObservedPoint[] observations) {
+ // initialize the means
+ double fcMean = 0;
+ double fsMean = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+ final double currentYPrime = (currentY - previousY) / (currentX - previousX);
+
+ double omegaX = omega * currentX;
+ double cosine = FastMath.cos(omegaX);
+ double sine = FastMath.sin(omegaX);
+ fcMean += omega * currentY * cosine - currentYPrime * sine;
+ fsMean += omega * currentY * sine + currentYPrime * cosine;
+ }
+
+ return FastMath.atan2(-fsMean, fcMean);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java b/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java
new file mode 100644
index 0000000..1a41398
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/HarmonicFitter.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.function.HarmonicOscillator;
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.NumberIsTooSmallException;
+import org.apache.commons.math3.exception.ZeroException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * Class that implements a curve fitting specialized for sinusoids.
+ *
+ * <p>Harmonic fitting is a very simple case of curve fitting. The estimated coefficients are the
+ * amplitude a, the pulsation &omega; and the phase &phi;: <code>f (t) = a cos (&omega; t + &phi;)
+ * </code>. They are searched by a least square estimator initialized with a rough guess based on
+ * integrals.
+ *
+ * @since 2.0
+ * @deprecated As of 3.3. Please use {@link HarmonicCurveFitter} and {@link WeightedObservedPoints}
+ * instead.
+ */
+@Deprecated
+public class HarmonicFitter extends CurveFitter<HarmonicOscillator.Parametric> {
+ /**
+ * Simple constructor.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public HarmonicFitter(final MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Fit an harmonic function to the observed points.
+ *
+ * @param initialGuess First guess values in the following order:
+ * <ul>
+ * <li>Amplitude
+ * <li>Angular frequency
+ * <li>Phase
+ * </ul>
+ *
+ * @return the parameters of the harmonic function that best fits the observed points (in the
+ * same order as above).
+ */
+ public double[] fit(double[] initialGuess) {
+ return fit(new HarmonicOscillator.Parametric(), initialGuess);
+ }
+
+ /**
+ * Fit an harmonic function to the observed points. An initial guess will be automatically
+ * computed.
+ *
+ * @return the parameters of the harmonic function that best fits the observed points (see the
+ * other {@link #fit(double[]) fit} method.
+ * @throws NumberIsTooSmallException if the sample is too short for the the first guess to be
+ * computed.
+ * @throws ZeroException if the first guess cannot be computed because the abscissa range is
+ * zero.
+ */
+ public double[] fit() {
+ return fit((new ParameterGuesser(getObservations())).guess());
+ }
+
+ /**
+ * This class guesses harmonic coefficients from a sample.
+ *
+ * <p>The algorithm used to guess the coefficients is as follows:
+ *
+ * <p>We know f (t) at some sampling points t<sub>i</sub> and want to find a, &omega; and &phi;
+ * such that f (t) = a cos (&omega; t + &phi;).
+ *
+ * <p>From the analytical expression, we can compute two primitives :
+ *
+ * <pre>
+ * If2 (t) = &int; f<sup>2</sup> = a<sup>2</sup> &times; [t + S (t)] / 2
+ * If'2 (t) = &int; f'<sup>2</sup> = a<sup>2</sup> &omega;<sup>2</sup> &times; [t - S (t)] / 2
+ * where S (t) = sin (2 (&omega; t + &phi;)) / (2 &omega;)
+ * </pre>
+ *
+ * <p>We can remove S between these expressions :
+ *
+ * <pre>
+ * If'2 (t) = a<sup>2</sup> &omega;<sup>2</sup> t - &omega;<sup>2</sup> If2 (t)
+ * </pre>
+ *
+ * <p>The preceding expression shows that If'2 (t) is a linear combination of both t and If2
+ * (t): If'2 (t) = A &times; t + B &times; If2 (t)
+ *
+ * <p>From the primitive, we can deduce the same form for definite integrals between
+ * t<sub>1</sub> and t<sub>i</sub> for each t<sub>i</sub> :
+ *
+ * <pre>
+ * If2 (t<sub>i</sub>) - If2 (t<sub>1</sub>) = A &times; (t<sub>i</sub> - t<sub>1</sub>) + B &times; (If2 (t<sub>i</sub>) - If2 (t<sub>1</sub>))
+ * </pre>
+ *
+ * <p>We can find the coefficients A and B that best fit the sample to this linear expression by
+ * computing the definite integrals for each sample points.
+ *
+ * <p>For a bilinear expression z (x<sub>i</sub>, y<sub>i</sub>) = A &times; x<sub>i</sub> + B
+ * &times; y<sub>i</sub>, the coefficients A and B that minimize a least square criterion &sum;
+ * (z<sub>i</sub> - z (x<sub>i</sub>, y<sub>i</sub>))<sup>2</sup> are given by these
+ * expressions:
+ *
+ * <pre>
+ *
+ * &sum;y<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
+ * A = ------------------------
+ * &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
+ *
+ * &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub>
+ * B = ------------------------
+ * &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
+ * </pre>
+ *
+ * <p>In fact, we can assume both a and &omega; are positive and compute them directly, knowing
+ * that A = a<sup>2</sup> &omega;<sup>2</sup> and that B = - &omega;<sup>2</sup>. The complete
+ * algorithm is therefore:
+ *
+ * <pre>
+ *
+ * for each t<sub>i</sub> from t<sub>1</sub> to t<sub>n-1</sub>, compute:
+ * f (t<sub>i</sub>)
+ * f' (t<sub>i</sub>) = (f (t<sub>i+1</sub>) - f(t<sub>i-1</sub>)) / (t<sub>i+1</sub> - t<sub>i-1</sub>)
+ * x<sub>i</sub> = t<sub>i</sub> - t<sub>1</sub>
+ * y<sub>i</sub> = &int; f<sup>2</sup> from t<sub>1</sub> to t<sub>i</sub>
+ * z<sub>i</sub> = &int; f'<sup>2</sup> from t<sub>1</sub> to t<sub>i</sub>
+ * update the sums &sum;x<sub>i</sub>x<sub>i</sub>, &sum;y<sub>i</sub>y<sub>i</sub>, &sum;x<sub>i</sub>y<sub>i</sub>, &sum;x<sub>i</sub>z<sub>i</sub> and &sum;y<sub>i</sub>z<sub>i</sub>
+ * end for
+ *
+ * |--------------------------
+ * \ | &sum;y<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
+ * a = \ | ------------------------
+ * \| &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
+ *
+ *
+ * |--------------------------
+ * \ | &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>z<sub>i</sub> - &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>z<sub>i</sub>
+ * &omega; = \ | ------------------------
+ * \| &sum;x<sub>i</sub>x<sub>i</sub> &sum;y<sub>i</sub>y<sub>i</sub> - &sum;x<sub>i</sub>y<sub>i</sub> &sum;x<sub>i</sub>y<sub>i</sub>
+ *
+ * </pre>
+ *
+ * <p>Once we know &omega;, we can compute:
+ *
+ * <pre>
+ * fc = &omega; f (t) cos (&omega; t) - f' (t) sin (&omega; t)
+ * fs = &omega; f (t) sin (&omega; t) + f' (t) cos (&omega; t)
+ * </pre>
+ *
+ * <p>It appears that <code>fc = a &omega; cos (&phi;)</code> and <code>
+ * fs = -a &omega; sin (&phi;)</code>, so we can use these expressions to compute &phi;. The
+ * best estimate over the sample is given by averaging these expressions.
+ *
+ * <p>Since integrals and means are involved in the preceding estimations, these operations run
+ * in O(n) time, where n is the number of measurements.
+ */
+ public static class ParameterGuesser {
+ /** Amplitude. */
+ private final double a;
+
+ /** Angular frequency. */
+ private final double omega;
+
+ /** Phase. */
+ private final double phi;
+
+ /**
+ * Simple constructor.
+ *
+ * @param observations Sampled observations.
+ * @throws NumberIsTooSmallException if the sample is too short.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot produce sensible
+ * results.
+ */
+ public ParameterGuesser(WeightedObservedPoint[] observations) {
+ if (observations.length < 4) {
+ throw new NumberIsTooSmallException(
+ LocalizedFormats.INSUFFICIENT_OBSERVED_POINTS_IN_SAMPLE,
+ observations.length,
+ 4,
+ true);
+ }
+
+ final WeightedObservedPoint[] sorted = sortObservations(observations);
+
+ final double aOmega[] = guessAOmega(sorted);
+ a = aOmega[0];
+ omega = aOmega[1];
+
+ phi = guessPhi(sorted);
+ }
+
+ /**
+ * Gets an estimation of the parameters.
+ *
+ * @return the guessed parameters, in the following order:
+ * <ul>
+ * <li>Amplitude
+ * <li>Angular frequency
+ * <li>Phase
+ * </ul>
+ */
+ public double[] guess() {
+ return new double[] {a, omega, phi};
+ }
+
+ /**
+ * Sort the observations with respect to the abscissa.
+ *
+ * @param unsorted Input observations.
+ * @return the input observations, sorted.
+ */
+ private WeightedObservedPoint[] sortObservations(WeightedObservedPoint[] unsorted) {
+ final WeightedObservedPoint[] observations = unsorted.clone();
+
+ // Since the samples are almost always already sorted, this
+ // method is implemented as an insertion sort that reorders the
+ // elements in place. Insertion sort is very efficient in this case.
+ WeightedObservedPoint curr = observations[0];
+ for (int j = 1; j < observations.length; ++j) {
+ WeightedObservedPoint prec = curr;
+ curr = observations[j];
+ if (curr.getX() < prec.getX()) {
+ // the current element should be inserted closer to the beginning
+ int i = j - 1;
+ WeightedObservedPoint mI = observations[i];
+ while ((i >= 0) && (curr.getX() < mI.getX())) {
+ observations[i + 1] = mI;
+ if (i-- != 0) {
+ mI = observations[i];
+ }
+ }
+ observations[i + 1] = curr;
+ curr = observations[j];
+ }
+ }
+
+ return observations;
+ }
+
+ /**
+ * Estimate a first guess of the amplitude and angular frequency. This method assumes that
+ * the {@link #sortObservations(WeightedObservedPoint[])} method has been called previously.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @throws ZeroException if the abscissa range is zero.
+ * @throws MathIllegalStateException when the guessing procedure cannot produce sensible
+ * results.
+ * @return the guessed amplitude (at index 0) and circular frequency (at index 1).
+ */
+ private double[] guessAOmega(WeightedObservedPoint[] observations) {
+ final double[] aOmega = new double[2];
+
+ // initialize the sums for the linear model between the two integrals
+ double sx2 = 0;
+ double sy2 = 0;
+ double sxy = 0;
+ double sxz = 0;
+ double syz = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ double f2Integral = 0;
+ double fPrime2Integral = 0;
+ final double startX = currentX;
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+
+ // update the integrals of f<sup>2</sup> and f'<sup>2</sup>
+ // considering a linear model for f (and therefore constant f')
+ final double dx = currentX - previousX;
+ final double dy = currentY - previousY;
+ final double f2StepIntegral =
+ dx
+ * (previousY * previousY
+ + previousY * currentY
+ + currentY * currentY)
+ / 3;
+ final double fPrime2StepIntegral = dy * dy / dx;
+
+ final double x = currentX - startX;
+ f2Integral += f2StepIntegral;
+ fPrime2Integral += fPrime2StepIntegral;
+
+ sx2 += x * x;
+ sy2 += f2Integral * f2Integral;
+ sxy += x * f2Integral;
+ sxz += x * fPrime2Integral;
+ syz += f2Integral * fPrime2Integral;
+ }
+
+ // compute the amplitude and pulsation coefficients
+ double c1 = sy2 * sxz - sxy * syz;
+ double c2 = sxy * sxz - sx2 * syz;
+ double c3 = sx2 * sy2 - sxy * sxy;
+ if ((c1 / c2 < 0) || (c2 / c3 < 0)) {
+ final int last = observations.length - 1;
+ // Range of the observations, assuming that the
+ // observations are sorted.
+ final double xRange = observations[last].getX() - observations[0].getX();
+ if (xRange == 0) {
+ throw new ZeroException();
+ }
+ aOmega[1] = 2 * Math.PI / xRange;
+
+ double yMin = Double.POSITIVE_INFINITY;
+ double yMax = Double.NEGATIVE_INFINITY;
+ for (int i = 1; i < observations.length; ++i) {
+ final double y = observations[i].getY();
+ if (y < yMin) {
+ yMin = y;
+ }
+ if (y > yMax) {
+ yMax = y;
+ }
+ }
+ aOmega[0] = 0.5 * (yMax - yMin);
+ } else {
+ if (c2 == 0) {
+ // In some ill-conditioned cases (cf. MATH-844), the guesser
+ // procedure cannot produce sensible results.
+ throw new MathIllegalStateException(LocalizedFormats.ZERO_DENOMINATOR);
+ }
+
+ aOmega[0] = FastMath.sqrt(c1 / c2);
+ aOmega[1] = FastMath.sqrt(c2 / c3);
+ }
+
+ return aOmega;
+ }
+
+ /**
+ * Estimate a first guess of the phase.
+ *
+ * @param observations Observations, sorted w.r.t. abscissa.
+ * @return the guessed phase.
+ */
+ private double guessPhi(WeightedObservedPoint[] observations) {
+ // initialize the means
+ double fcMean = 0;
+ double fsMean = 0;
+
+ double currentX = observations[0].getX();
+ double currentY = observations[0].getY();
+ for (int i = 1; i < observations.length; ++i) {
+ // one step forward
+ final double previousX = currentX;
+ final double previousY = currentY;
+ currentX = observations[i].getX();
+ currentY = observations[i].getY();
+ final double currentYPrime = (currentY - previousY) / (currentX - previousX);
+
+ double omegaX = omega * currentX;
+ double cosine = FastMath.cos(omegaX);
+ double sine = FastMath.sin(omegaX);
+ fcMean += omega * currentY * cosine - currentYPrime * sine;
+ fsMean += omega * currentY * sine + currentYPrime * cosine;
+ }
+
+ return FastMath.atan2(-fsMean, fcMean);
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/PolynomialCurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/PolynomialCurveFitter.java
new file mode 100644
index 0000000..ab2b5ca
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/PolynomialCurveFitter.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.polynomials.PolynomialFunction;
+import org.apache.commons.math3.exception.MathInternalError;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+
+import java.util.Collection;
+
+/**
+ * Fits points to a {@link
+ * org.apache.commons.math3.analysis.polynomials.PolynomialFunction.Parametric polynomial} function.
+ * <br>
+ * The size of the {@link #withStartPoint(double[]) initial guess} array defines the degree of the
+ * polynomial to be fitted. They must be sorted in increasing order of the polynomial's degree. The
+ * optimal values of the coefficients will be returned in the same order.
+ *
+ * @since 3.3
+ */
+public class PolynomialCurveFitter extends AbstractCurveFitter {
+ /** Parametric function to be fitted. */
+ private static final PolynomialFunction.Parametric FUNCTION =
+ new PolynomialFunction.Parametric();
+
+ /** Initial guess. */
+ private final double[] initialGuess;
+
+ /** Maximum number of iterations of the optimization algorithm. */
+ private final int maxIter;
+
+ /**
+ * Contructor used by the factory methods.
+ *
+ * @param initialGuess Initial guess.
+ * @param maxIter Maximum number of iterations of the optimization algorithm.
+ * @throws MathInternalError if {@code initialGuess} is {@code null}.
+ */
+ private PolynomialCurveFitter(double[] initialGuess, int maxIter) {
+ this.initialGuess = initialGuess;
+ this.maxIter = maxIter;
+ }
+
+ /**
+ * Creates a default curve fitter. Zero will be used as initial guess for the coefficients, and
+ * the maximum number of iterations of the optimization algorithm is set to {@link
+ * Integer#MAX_VALUE}.
+ *
+ * @param degree Degree of the polynomial to be fitted.
+ * @return a curve fitter.
+ * @see #withStartPoint(double[])
+ * @see #withMaxIterations(int)
+ */
+ public static PolynomialCurveFitter create(int degree) {
+ return new PolynomialCurveFitter(new double[degree + 1], Integer.MAX_VALUE);
+ }
+
+ /**
+ * Configure the start point (initial guess).
+ *
+ * @param newStart new start point (initial guess)
+ * @return a new instance.
+ */
+ public PolynomialCurveFitter withStartPoint(double[] newStart) {
+ return new PolynomialCurveFitter(newStart.clone(), maxIter);
+ }
+
+ /**
+ * Configure the maximum number of iterations.
+ *
+ * @param newMaxIter maximum number of iterations
+ * @return a new instance.
+ */
+ public PolynomialCurveFitter withMaxIterations(int newMaxIter) {
+ return new PolynomialCurveFitter(initialGuess, newMaxIter);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
+ // Prepare least-squares problem.
+ final int len = observations.size();
+ final double[] target = new double[len];
+ final double[] weights = new double[len];
+
+ int i = 0;
+ for (WeightedObservedPoint obs : observations) {
+ target[i] = obs.getY();
+ weights[i] = obs.getWeight();
+ ++i;
+ }
+
+ final AbstractCurveFitter.TheoreticalValuesFunction model =
+ new AbstractCurveFitter.TheoreticalValuesFunction(FUNCTION, observations);
+
+ if (initialGuess == null) {
+ throw new MathInternalError();
+ }
+
+ // Return a new least squares problem set up to fit a polynomial curve to the
+ // observed points.
+ return new LeastSquaresBuilder()
+ .maxEvaluations(Integer.MAX_VALUE)
+ .maxIterations(maxIter)
+ .start(initialGuess)
+ .target(target)
+ .weight(new DiagonalMatrix(weights))
+ .model(model.getModelFunction(), model.getModelFunctionJacobian())
+ .build();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java b/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java
new file mode 100644
index 0000000..0dd17a4
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/PolynomialFitter.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.polynomials.PolynomialFunction;
+import org.apache.commons.math3.optim.nonlinear.vector.MultivariateVectorOptimizer;
+
+/**
+ * Polynomial fitting is a very simple case of {@link CurveFitter curve fitting}. The estimated
+ * coefficients are the polynomial coefficients (see the {@link #fit(double[]) fit} method).
+ *
+ * @since 2.0
+ * @deprecated As of 3.3. Please use {@link PolynomialCurveFitter} and {@link
+ * WeightedObservedPoints} instead.
+ */
+@Deprecated
+public class PolynomialFitter extends CurveFitter<PolynomialFunction.Parametric> {
+ /**
+ * Simple constructor.
+ *
+ * @param optimizer Optimizer to use for the fitting.
+ */
+ public PolynomialFitter(MultivariateVectorOptimizer optimizer) {
+ super(optimizer);
+ }
+
+ /**
+ * Get the coefficients of the polynomial fitting the weighted data points. The degree of the
+ * fitting polynomial is {@code guess.length - 1}.
+ *
+ * @param guess First guess for the coefficients. They must be sorted in increasing order of the
+ * polynomial's degree.
+ * @param maxEval Maximum number of evaluations of the polynomial.
+ * @return the coefficients of the polynomial that best fits the observed points.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException if the number of
+ * evaluations exceeds {@code maxEval}.
+ * @throws org.apache.commons.math3.exception.ConvergenceException if the algorithm failed to
+ * converge.
+ */
+ public double[] fit(int maxEval, double[] guess) {
+ return fit(maxEval, new PolynomialFunction.Parametric(), guess);
+ }
+
+ /**
+ * Get the coefficients of the polynomial fitting the weighted data points. The degree of the
+ * fitting polynomial is {@code guess.length - 1}.
+ *
+ * @param guess First guess for the coefficients. They must be sorted in increasing order of the
+ * polynomial's degree.
+ * @return the coefficients of the polynomial that best fits the observed points.
+ * @throws org.apache.commons.math3.exception.ConvergenceException if the algorithm failed to
+ * converge.
+ */
+ public double[] fit(double[] guess) {
+ return fit(new PolynomialFunction.Parametric(), guess);
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/SimpleCurveFitter.java b/src/main/java/org/apache/commons/math3/fitting/SimpleCurveFitter.java
new file mode 100644
index 0000000..304f661
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/SimpleCurveFitter.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import org.apache.commons.math3.analysis.ParametricUnivariateFunction;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+
+import java.util.Collection;
+
+/**
+ * Fits points to a user-defined {@link ParametricUnivariateFunction function}.
+ *
+ * @since 3.4
+ */
+public class SimpleCurveFitter extends AbstractCurveFitter {
+ /** Function to fit. */
+ private final ParametricUnivariateFunction function;
+
+ /** Initial guess for the parameters. */
+ private final double[] initialGuess;
+
+ /** Maximum number of iterations of the optimization algorithm. */
+ private final int maxIter;
+
+ /**
+ * Contructor used by the factory methods.
+ *
+ * @param function Function to fit.
+ * @param initialGuess Initial guess. Cannot be {@code null}. Its length must be consistent with
+ * the number of parameters of the {@code function} to fit.
+ * @param maxIter Maximum number of iterations of the optimization algorithm.
+ */
+ private SimpleCurveFitter(
+ ParametricUnivariateFunction function, double[] initialGuess, int maxIter) {
+ this.function = function;
+ this.initialGuess = initialGuess;
+ this.maxIter = maxIter;
+ }
+
+ /**
+ * Creates a curve fitter. The maximum number of iterations of the optimization algorithm is set
+ * to {@link Integer#MAX_VALUE}.
+ *
+ * @param f Function to fit.
+ * @param start Initial guess for the parameters. Cannot be {@code null}. Its length must be
+ * consistent with the number of parameters of the function to fit.
+ * @return a curve fitter.
+ * @see #withStartPoint(double[])
+ * @see #withMaxIterations(int)
+ */
+ public static SimpleCurveFitter create(ParametricUnivariateFunction f, double[] start) {
+ return new SimpleCurveFitter(f, start, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Configure the start point (initial guess).
+ *
+ * @param newStart new start point (initial guess)
+ * @return a new instance.
+ */
+ public SimpleCurveFitter withStartPoint(double[] newStart) {
+ return new SimpleCurveFitter(function, newStart.clone(), maxIter);
+ }
+
+ /**
+ * Configure the maximum number of iterations.
+ *
+ * @param newMaxIter maximum number of iterations
+ * @return a new instance.
+ */
+ public SimpleCurveFitter withMaxIterations(int newMaxIter) {
+ return new SimpleCurveFitter(function, initialGuess, newMaxIter);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ protected LeastSquaresProblem getProblem(Collection<WeightedObservedPoint> observations) {
+ // Prepare least-squares problem.
+ final int len = observations.size();
+ final double[] target = new double[len];
+ final double[] weights = new double[len];
+
+ int count = 0;
+ for (WeightedObservedPoint obs : observations) {
+ target[count] = obs.getY();
+ weights[count] = obs.getWeight();
+ ++count;
+ }
+
+ final AbstractCurveFitter.TheoreticalValuesFunction model =
+ new AbstractCurveFitter.TheoreticalValuesFunction(function, observations);
+
+ // Create an optimizer for fitting the curve to the observed points.
+ return new LeastSquaresBuilder()
+ .maxEvaluations(Integer.MAX_VALUE)
+ .maxIterations(maxIter)
+ .start(initialGuess)
+ .target(target)
+ .weight(new DiagonalMatrix(weights))
+ .model(model.getModelFunction(), model.getModelFunctionJacobian())
+ .build();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java
new file mode 100644
index 0000000..ec88747
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoint.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.io.Serializable;
+
+/**
+ * This class is a simple container for weighted observed point in {@link CurveFitter curve
+ * fitting}.
+ *
+ * <p>Instances of this class are guaranteed to be immutable.
+ *
+ * @since 2.0
+ */
+public class WeightedObservedPoint implements Serializable {
+ /** Serializable version id. */
+ private static final long serialVersionUID = 5306874947404636157L;
+
+ /** Weight of the measurement in the fitting process. */
+ private final double weight;
+
+ /** Abscissa of the point. */
+ private final double x;
+
+ /** Observed value of the function at x. */
+ private final double y;
+
+ /**
+ * Simple constructor.
+ *
+ * @param weight Weight of the measurement in the fitting process.
+ * @param x Abscissa of the measurement.
+ * @param y Ordinate of the measurement.
+ */
+ public WeightedObservedPoint(final double weight, final double x, final double y) {
+ this.weight = weight;
+ this.x = x;
+ this.y = y;
+ }
+
+ /**
+ * Gets the weight of the measurement in the fitting process.
+ *
+ * @return the weight of the measurement in the fitting process.
+ */
+ public double getWeight() {
+ return weight;
+ }
+
+ /**
+ * Gets the abscissa of the point.
+ *
+ * @return the abscissa of the point.
+ */
+ public double getX() {
+ return x;
+ }
+
+ /**
+ * Gets the observed value of the function at x.
+ *
+ * @return the observed value of the function at x.
+ */
+ public double getY() {
+ return y;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoints.java b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoints.java
new file mode 100644
index 0000000..69deaae
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/WeightedObservedPoints.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Simple container for weighted observed points used in {@link AbstractCurveFitter curve fitting}
+ * algorithms.
+ *
+ * @since 3.3
+ */
+public class WeightedObservedPoints implements Serializable {
+ /** Serializable version id. */
+ private static final long serialVersionUID = 20130813L;
+
+ /** Observed points. */
+ private final List<WeightedObservedPoint> observations = new ArrayList<WeightedObservedPoint>();
+
+ /**
+ * Adds a point to the sample. Calling this method is equivalent to calling {@code add(1.0, x,
+ * y)}.
+ *
+ * @param x Abscissa of the point.
+ * @param y Observed value at {@code x}. After fitting we should have {@code f(x)} as close as
+ * possible to this value.
+ * @see #add(double, double, double)
+ * @see #add(WeightedObservedPoint)
+ * @see #toList()
+ */
+ public void add(double x, double y) {
+ add(1d, x, y);
+ }
+
+ /**
+ * Adds a point to the sample.
+ *
+ * @param weight Weight of the observed point.
+ * @param x Abscissa of the point.
+ * @param y Observed value at {@code x}. After fitting we should have {@code f(x)} as close as
+ * possible to this value.
+ * @see #add(double, double)
+ * @see #add(WeightedObservedPoint)
+ * @see #toList()
+ */
+ public void add(double weight, double x, double y) {
+ observations.add(new WeightedObservedPoint(weight, x, y));
+ }
+
+ /**
+ * Adds a point to the sample.
+ *
+ * @param observed Observed point to add.
+ * @see #add(double, double)
+ * @see #add(double, double, double)
+ * @see #toList()
+ */
+ public void add(WeightedObservedPoint observed) {
+ observations.add(observed);
+ }
+
+ /**
+ * Gets a <em>snapshot</em> of the observed points. The list of stored points is copied in order
+ * to ensure that modification of the returned instance does not affect this container.
+ * Conversely, further modification of this container (through the {@code add} or {@code clear}
+ * methods) will not affect the returned list.
+ *
+ * @return the observed points, in the order they were added to this container.
+ * @see #add(double, double)
+ * @see #add(double, double, double)
+ * @see #add(WeightedObservedPoint)
+ */
+ public List<WeightedObservedPoint> toList() {
+ // The copy is necessary to ensure thread-safety because of the
+ // "clear" method (which otherwise would be able to empty the
+ // list of points while it is being used by another thread).
+ return new ArrayList<WeightedObservedPoint>(observations);
+ }
+
+ /** Removes all observations from this container. */
+ public void clear() {
+ observations.clear();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/AbstractEvaluation.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/AbstractEvaluation.java
new file mode 100644
index 0000000..b164380
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/AbstractEvaluation.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.DecompositionSolver;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.util.FastMath;
+
+/**
+ * An implementation of {@link Evaluation} that is designed for extension. All of the
+ * methods implemented here use the methods that are left unimplemented.
+ * <p/>
+ * TODO cache results?
+ *
+ * @since 3.3
+ */
+public abstract class AbstractEvaluation implements Evaluation {
+
+ /** number of observations */
+ private final int observationSize;
+
+ /**
+ * Constructor.
+ *
+ * @param observationSize the number of observation. Needed for {@link
+ * #getRMS()}.
+ */
+ AbstractEvaluation(final int observationSize) {
+ this.observationSize = observationSize;
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix getCovariances(double threshold) {
+ // Set up the Jacobian.
+ final RealMatrix j = this.getJacobian();
+
+ // Compute transpose(J)J.
+ final RealMatrix jTj = j.transpose().multiply(j);
+
+ // Compute the covariances matrix.
+ final DecompositionSolver solver
+ = new QRDecomposition(jTj, threshold).getSolver();
+ return solver.getInverse();
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getSigma(double covarianceSingularityThreshold) {
+ final RealMatrix cov = this.getCovariances(covarianceSingularityThreshold);
+ final int nC = cov.getColumnDimension();
+ final RealVector sig = new ArrayRealVector(nC);
+ for (int i = 0; i < nC; ++i) {
+ sig.setEntry(i, FastMath.sqrt(cov.getEntry(i,i)));
+ }
+ return sig;
+ }
+
+ /** {@inheritDoc} */
+ public double getRMS() {
+ final double cost = this.getCost();
+ return FastMath.sqrt(cost * cost / this.observationSize);
+ }
+
+ /** {@inheritDoc} */
+ public double getCost() {
+ final ArrayRealVector r = new ArrayRealVector(this.getResiduals());
+ return FastMath.sqrt(r.dotProduct(r));
+ }
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/DenseWeightedEvaluation.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/DenseWeightedEvaluation.java
new file mode 100644
index 0000000..89f5f1f
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/DenseWeightedEvaluation.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+
+/**
+ * Applies a dense weight matrix to an evaluation.
+ *
+ * @since 3.3
+ */
+class DenseWeightedEvaluation extends AbstractEvaluation {
+
+ /** the unweighted evaluation */
+ private final Evaluation unweighted;
+ /** reference to the weight square root matrix */
+ private final RealMatrix weightSqrt;
+
+ /**
+ * Create a weighted evaluation from an unweighted one.
+ *
+ * @param unweighted the evalutation before weights are applied
+ * @param weightSqrt the matrix square root of the weight matrix
+ */
+ DenseWeightedEvaluation(final Evaluation unweighted,
+ final RealMatrix weightSqrt) {
+ // weight square root is square, nR=nC=number of observations
+ super(weightSqrt.getColumnDimension());
+ this.unweighted = unweighted;
+ this.weightSqrt = weightSqrt;
+ }
+
+ /* apply weights */
+
+ /** {@inheritDoc} */
+ public RealMatrix getJacobian() {
+ return weightSqrt.multiply(this.unweighted.getJacobian());
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getResiduals() {
+ return this.weightSqrt.operate(this.unweighted.getResiduals());
+ }
+
+ /* delegate */
+
+ /** {@inheritDoc} */
+ public RealVector getPoint() {
+ return unweighted.getPoint();
+ }
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/EvaluationRmsChecker.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/EvaluationRmsChecker.java
new file mode 100644
index 0000000..ceb5988
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/EvaluationRmsChecker.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.util.Precision;
+
+/**
+ * Check if an optimization has converged based on the change in computed RMS.
+ *
+ * @since 3.4
+ */
+public class EvaluationRmsChecker implements ConvergenceChecker<Evaluation> {
+
+ /** relative tolerance for comparisons. */
+ private final double relTol;
+ /** absolute tolerance for comparisons. */
+ private final double absTol;
+
+ /**
+ * Create a convergence checker for the RMS with the same relative and absolute
+ * tolerance.
+ *
+ * <p>Convenience constructor for when the relative and absolute tolerances are the
+ * same. Same as {@code new EvaluationRmsChecker(tol, tol)}.
+ *
+ * @param tol the relative and absolute tolerance.
+ * @see #EvaluationRmsChecker(double, double)
+ */
+ public EvaluationRmsChecker(final double tol) {
+ this(tol, tol);
+ }
+
+ /**
+ * Create a convergence checker for the RMS with a relative and absolute tolerance.
+ *
+ * <p>The optimization has converged when the RMS of consecutive evaluations are equal
+ * to within the given relative tolerance or absolute tolerance.
+ *
+ * @param relTol the relative tolerance.
+ * @param absTol the absolute tolerance.
+ * @see Precision#equals(double, double, double)
+ * @see Precision#equalsWithRelativeTolerance(double, double, double)
+ */
+ public EvaluationRmsChecker(final double relTol, final double absTol) {
+ this.relTol = relTol;
+ this.absTol = absTol;
+ }
+
+ /** {@inheritDoc} */
+ public boolean converged(final int iteration,
+ final Evaluation previous,
+ final Evaluation current) {
+ final double prevRms = previous.getRMS();
+ final double currRms = current.getRMS();
+ return Precision.equals(prevRms, currRms, this.absTol) ||
+ Precision.equalsWithRelativeTolerance(prevRms, currRms, this.relTol);
+ }
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java
new file mode 100644
index 0000000..8157706
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/GaussNewtonOptimizer.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.NullArgumentException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.CholeskyDecomposition;
+import org.apache.commons.math3.linear.LUDecomposition;
+import org.apache.commons.math3.linear.MatrixUtils;
+import org.apache.commons.math3.linear.NonPositiveDefiniteMatrixException;
+import org.apache.commons.math3.linear.QRDecomposition;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.linear.SingularMatrixException;
+import org.apache.commons.math3.linear.SingularValueDecomposition;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.util.Pair;
+
+/**
+ * Gauss-Newton least-squares solver.
+ * <p> This class solve a least-square problem by
+ * solving the normal equations of the linearized problem at each iteration. Either LU
+ * decomposition or Cholesky decomposition can be used to solve the normal equations,
+ * or QR decomposition or SVD decomposition can be used to solve the linear system. LU
+ * decomposition is faster but QR decomposition is more robust for difficult problems,
+ * and SVD can compute a solution for rank-deficient problems.
+ * </p>
+ *
+ * @since 3.3
+ */
+public class GaussNewtonOptimizer implements LeastSquaresOptimizer {
+
+ /** The decomposition algorithm to use to solve the normal equations. */
+ //TODO move to linear package and expand options?
+ public enum Decomposition {
+ /**
+ * Solve by forming the normal equations (J<sup>T</sup>Jx=J<sup>T</sup>r) and
+ * using the {@link LUDecomposition}.
+ *
+ * <p> Theoretically this method takes mn<sup>2</sup>/2 operations to compute the
+ * normal matrix and n<sup>3</sup>/3 operations (m > n) to solve the system using
+ * the LU decomposition. </p>
+ */
+ LU {
+ @Override
+ protected RealVector solve(final RealMatrix jacobian,
+ final RealVector residuals) {
+ try {
+ final Pair<RealMatrix, RealVector> normalEquation =
+ computeNormalMatrix(jacobian, residuals);
+ final RealMatrix normal = normalEquation.getFirst();
+ final RealVector jTr = normalEquation.getSecond();
+ return new LUDecomposition(normal, SINGULARITY_THRESHOLD)
+ .getSolver()
+ .solve(jTr);
+ } catch (SingularMatrixException e) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM, e);
+ }
+ }
+ },
+ /**
+ * Solve the linear least squares problem (Jx=r) using the {@link
+ * QRDecomposition}.
+ *
+ * <p> Theoretically this method takes mn<sup>2</sup> - n<sup>3</sup>/3 operations
+ * (m > n) and has better numerical accuracy than any method that forms the normal
+ * equations. </p>
+ */
+ QR {
+ @Override
+ protected RealVector solve(final RealMatrix jacobian,
+ final RealVector residuals) {
+ try {
+ return new QRDecomposition(jacobian, SINGULARITY_THRESHOLD)
+ .getSolver()
+ .solve(residuals);
+ } catch (SingularMatrixException e) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM, e);
+ }
+ }
+ },
+ /**
+ * Solve by forming the normal equations (J<sup>T</sup>Jx=J<sup>T</sup>r) and
+ * using the {@link CholeskyDecomposition}.
+ *
+ * <p> Theoretically this method takes mn<sup>2</sup>/2 operations to compute the
+ * normal matrix and n<sup>3</sup>/6 operations (m > n) to solve the system using
+ * the Cholesky decomposition. </p>
+ */
+ CHOLESKY {
+ @Override
+ protected RealVector solve(final RealMatrix jacobian,
+ final RealVector residuals) {
+ try {
+ final Pair<RealMatrix, RealVector> normalEquation =
+ computeNormalMatrix(jacobian, residuals);
+ final RealMatrix normal = normalEquation.getFirst();
+ final RealVector jTr = normalEquation.getSecond();
+ return new CholeskyDecomposition(
+ normal, SINGULARITY_THRESHOLD, SINGULARITY_THRESHOLD)
+ .getSolver()
+ .solve(jTr);
+ } catch (NonPositiveDefiniteMatrixException e) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_SOLVE_SINGULAR_PROBLEM, e);
+ }
+ }
+ },
+ /**
+ * Solve the linear least squares problem using the {@link
+ * SingularValueDecomposition}.
+ *
+ * <p> This method is slower, but can provide a solution for rank deficient and
+ * nearly singular systems.
+ */
+ SVD {
+ @Override
+ protected RealVector solve(final RealMatrix jacobian,
+ final RealVector residuals) {
+ return new SingularValueDecomposition(jacobian)
+ .getSolver()
+ .solve(residuals);
+ }
+ };
+
+ /**
+ * Solve the linear least squares problem Jx=r.
+ *
+ * @param jacobian the Jacobian matrix, J. the number of rows >= the number or
+ * columns.
+ * @param residuals the computed residuals, r.
+ * @return the solution x, to the linear least squares problem Jx=r.
+ * @throws ConvergenceException if the matrix properties (e.g. singular) do not
+ * permit a solution.
+ */
+ protected abstract RealVector solve(RealMatrix jacobian,
+ RealVector residuals);
+ }
+
+ /**
+ * The singularity threshold for matrix decompositions. Determines when a {@link
+ * ConvergenceException} is thrown. The current value was the default value for {@link
+ * LUDecomposition}.
+ */
+ private static final double SINGULARITY_THRESHOLD = 1e-11;
+
+ /** Indicator for using LU decomposition. */
+ private final Decomposition decomposition;
+
+ /**
+ * Creates a Gauss Newton optimizer.
+ * <p/>
+ * The default for the algorithm is to solve the normal equations using QR
+ * decomposition.
+ */
+ public GaussNewtonOptimizer() {
+ this(Decomposition.QR);
+ }
+
+ /**
+ * Create a Gauss Newton optimizer that uses the given decomposition algorithm to
+ * solve the normal equations.
+ *
+ * @param decomposition the {@link Decomposition} algorithm.
+ */
+ public GaussNewtonOptimizer(final Decomposition decomposition) {
+ this.decomposition = decomposition;
+ }
+
+ /**
+ * Get the matrix decomposition algorithm used to solve the normal equations.
+ *
+ * @return the matrix {@link Decomposition} algoritm.
+ */
+ public Decomposition getDecomposition() {
+ return this.decomposition;
+ }
+
+ /**
+ * Configure the decomposition algorithm.
+ *
+ * @param newDecomposition the {@link Decomposition} algorithm to use.
+ * @return a new instance.
+ */
+ public GaussNewtonOptimizer withDecomposition(final Decomposition newDecomposition) {
+ return new GaussNewtonOptimizer(newDecomposition);
+ }
+
+ /** {@inheritDoc} */
+ public Optimum optimize(final LeastSquaresProblem lsp) {
+ //create local evaluation and iteration counts
+ final Incrementor evaluationCounter = lsp.getEvaluationCounter();
+ final Incrementor iterationCounter = lsp.getIterationCounter();
+ final ConvergenceChecker<Evaluation> checker
+ = lsp.getConvergenceChecker();
+
+ // Computation will be useless without a checker (see "for-loop").
+ if (checker == null) {
+ throw new NullArgumentException();
+ }
+
+ RealVector currentPoint = lsp.getStart();
+
+ // iterate until convergence is reached
+ Evaluation current = null;
+ while (true) {
+ iterationCounter.incrementCount();
+
+ // evaluate the objective function and its jacobian
+ Evaluation previous = current;
+ // Value of the objective function at "currentPoint".
+ evaluationCounter.incrementCount();
+ current = lsp.evaluate(currentPoint);
+ final RealVector currentResiduals = current.getResiduals();
+ final RealMatrix weightedJacobian = current.getJacobian();
+ currentPoint = current.getPoint();
+
+ // Check convergence.
+ if (previous != null &&
+ checker.converged(iterationCounter.getCount(), previous, current)) {
+ return new OptimumImpl(current,
+ evaluationCounter.getCount(),
+ iterationCounter.getCount());
+ }
+
+ // solve the linearized least squares problem
+ final RealVector dX = this.decomposition.solve(weightedJacobian, currentResiduals);
+ // update the estimated parameters
+ currentPoint = currentPoint.add(dX);
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public String toString() {
+ return "GaussNewtonOptimizer{" +
+ "decomposition=" + decomposition +
+ '}';
+ }
+
+ /**
+ * Compute the normal matrix, J<sup>T</sup>J.
+ *
+ * @param jacobian the m by n jacobian matrix, J. Input.
+ * @param residuals the m by 1 residual vector, r. Input.
+ * @return the n by n normal matrix and the n by 1 J<sup>Tr vector.
+ */
+ private static Pair<RealMatrix, RealVector> computeNormalMatrix(final RealMatrix jacobian,
+ final RealVector residuals) {
+ //since the normal matrix is symmetric, we only need to compute half of it.
+ final int nR = jacobian.getRowDimension();
+ final int nC = jacobian.getColumnDimension();
+ //allocate space for return values
+ final RealMatrix normal = MatrixUtils.createRealMatrix(nC, nC);
+ final RealVector jTr = new ArrayRealVector(nC);
+ //for each measurement
+ for (int i = 0; i < nR; ++i) {
+ //compute JTr for measurement i
+ for (int j = 0; j < nC; j++) {
+ jTr.setEntry(j, jTr.getEntry(j) +
+ residuals.getEntry(i) * jacobian.getEntry(i, j));
+ }
+
+ // add the the contribution to the normal matrix for measurement i
+ for (int k = 0; k < nC; ++k) {
+ //only compute the upper triangular part
+ for (int l = k; l < nC; ++l) {
+ normal.setEntry(k, l, normal.getEntry(k, l) +
+ jacobian.getEntry(i, k) * jacobian.getEntry(i, l));
+ }
+ }
+ }
+ //copy the upper triangular part to the lower triangular part.
+ for (int i = 0; i < nC; i++) {
+ for (int j = 0; j < i; j++) {
+ normal.setEntry(i, j, normal.getEntry(j, i));
+ }
+ }
+ return new Pair<RealMatrix, RealVector>(normal, jTr);
+ }
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresAdapter.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresAdapter.java
new file mode 100644
index 0000000..1c09874
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresAdapter.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.util.Incrementor;
+
+/**
+ * An adapter that delegates to another implementation of {@link LeastSquaresProblem}.
+ *
+ * @since 3.3
+ */
+public class LeastSquaresAdapter implements LeastSquaresProblem {
+
+ /** the delegate problem */
+ private final LeastSquaresProblem problem;
+
+ /**
+ * Delegate the {@link LeastSquaresProblem} interface to the given implementation.
+ *
+ * @param problem the delegate
+ */
+ public LeastSquaresAdapter(final LeastSquaresProblem problem) {
+ this.problem = problem;
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getStart() {
+ return problem.getStart();
+ }
+
+ /** {@inheritDoc} */
+ public int getObservationSize() {
+ return problem.getObservationSize();
+ }
+
+ /** {@inheritDoc} */
+ public int getParameterSize() {
+ return problem.getParameterSize();
+ }
+
+ /** {@inheritDoc}
+ * @param point*/
+ public Evaluation evaluate(final RealVector point) {
+ return problem.evaluate(point);
+ }
+
+ /** {@inheritDoc} */
+ public Incrementor getEvaluationCounter() {
+ return problem.getEvaluationCounter();
+ }
+
+ /** {@inheritDoc} */
+ public Incrementor getIterationCounter() {
+ return problem.getIterationCounter();
+ }
+
+ /** {@inheritDoc} */
+ public ConvergenceChecker<Evaluation> getConvergenceChecker() {
+ return problem.getConvergenceChecker();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresBuilder.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresBuilder.java
new file mode 100644
index 0000000..7b14b37
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresBuilder.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+
+/**
+ * A mutable builder for {@link LeastSquaresProblem}s.
+ *
+ * @see LeastSquaresFactory
+ * @since 3.3
+ */
+public class LeastSquaresBuilder {
+
+ /** max evaluations */
+ private int maxEvaluations;
+ /** max iterations */
+ private int maxIterations;
+ /** convergence checker */
+ private ConvergenceChecker<Evaluation> checker;
+ /** model function */
+ private MultivariateJacobianFunction model;
+ /** observed values */
+ private RealVector target;
+ /** initial guess */
+ private RealVector start;
+ /** weight matrix */
+ private RealMatrix weight;
+ /**
+ * Lazy evaluation.
+ *
+ * @since 3.4
+ */
+ private boolean lazyEvaluation;
+ /** Validator.
+ *
+ * @since 3.4
+ */
+ private ParameterValidator paramValidator;
+
+
+ /**
+ * Construct a {@link LeastSquaresProblem} from the data in this builder.
+ *
+ * @return a new {@link LeastSquaresProblem}.
+ */
+ public LeastSquaresProblem build() {
+ return LeastSquaresFactory.create(model,
+ target,
+ start,
+ weight,
+ checker,
+ maxEvaluations,
+ maxIterations,
+ lazyEvaluation,
+ paramValidator);
+ }
+
+ /**
+ * Configure the max evaluations.
+ *
+ * @param newMaxEvaluations the maximum number of evaluations permitted.
+ * @return this
+ */
+ public LeastSquaresBuilder maxEvaluations(final int newMaxEvaluations) {
+ this.maxEvaluations = newMaxEvaluations;
+ return this;
+ }
+
+ /**
+ * Configure the max iterations.
+ *
+ * @param newMaxIterations the maximum number of iterations permitted.
+ * @return this
+ */
+ public LeastSquaresBuilder maxIterations(final int newMaxIterations) {
+ this.maxIterations = newMaxIterations;
+ return this;
+ }
+
+ /**
+ * Configure the convergence checker.
+ *
+ * @param newChecker the convergence checker.
+ * @return this
+ */
+ public LeastSquaresBuilder checker(final ConvergenceChecker<Evaluation> newChecker) {
+ this.checker = newChecker;
+ return this;
+ }
+
+ /**
+ * Configure the convergence checker.
+ * <p/>
+ * This function is an overloaded version of {@link #checker(ConvergenceChecker)}.
+ *
+ * @param newChecker the convergence checker.
+ * @return this
+ */
+ public LeastSquaresBuilder checkerPair(final ConvergenceChecker<PointVectorValuePair> newChecker) {
+ return this.checker(LeastSquaresFactory.evaluationChecker(newChecker));
+ }
+
+ /**
+ * Configure the model function.
+ *
+ * @param value the model function value
+ * @param jacobian the Jacobian of {@code value}
+ * @return this
+ */
+ public LeastSquaresBuilder model(final MultivariateVectorFunction value,
+ final MultivariateMatrixFunction jacobian) {
+ return model(LeastSquaresFactory.model(value, jacobian));
+ }
+
+ /**
+ * Configure the model function.
+ *
+ * @param newModel the model function value and Jacobian
+ * @return this
+ */
+ public LeastSquaresBuilder model(final MultivariateJacobianFunction newModel) {
+ this.model = newModel;
+ return this;
+ }
+
+ /**
+ * Configure the observed data.
+ *
+ * @param newTarget the observed data.
+ * @return this
+ */
+ public LeastSquaresBuilder target(final RealVector newTarget) {
+ this.target = newTarget;
+ return this;
+ }
+
+ /**
+ * Configure the observed data.
+ *
+ * @param newTarget the observed data.
+ * @return this
+ */
+ public LeastSquaresBuilder target(final double[] newTarget) {
+ return target(new ArrayRealVector(newTarget, false));
+ }
+
+ /**
+ * Configure the initial guess.
+ *
+ * @param newStart the initial guess.
+ * @return this
+ */
+ public LeastSquaresBuilder start(final RealVector newStart) {
+ this.start = newStart;
+ return this;
+ }
+
+ /**
+ * Configure the initial guess.
+ *
+ * @param newStart the initial guess.
+ * @return this
+ */
+ public LeastSquaresBuilder start(final double[] newStart) {
+ return start(new ArrayRealVector(newStart, false));
+ }
+
+ /**
+ * Configure the weight matrix.
+ *
+ * @param newWeight the weight matrix
+ * @return this
+ */
+ public LeastSquaresBuilder weight(final RealMatrix newWeight) {
+ this.weight = newWeight;
+ return this;
+ }
+
+ /**
+ * Configure whether evaluation will be lazy or not.
+ *
+ * @param newValue Whether to perform lazy evaluation.
+ * @return this object.
+ *
+ * @since 3.4
+ */
+ public LeastSquaresBuilder lazyEvaluation(final boolean newValue) {
+ lazyEvaluation = newValue;
+ return this;
+ }
+
+ /**
+ * Configure the validator of the model parameters.
+ *
+ * @param newValidator Parameter validator.
+ * @return this object.
+ *
+ * @since 3.4
+ */
+ public LeastSquaresBuilder parameterValidator(final ParameterValidator newValidator) {
+ paramValidator = newValidator;
+ return this;
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java
new file mode 100644
index 0000000..42cdf89
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresFactory.java
@@ -0,0 +1,532 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.exception.MathIllegalStateException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.analysis.MultivariateMatrixFunction;
+import org.apache.commons.math3.analysis.MultivariateVectorFunction;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.Array2DRowRealMatrix;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.DiagonalMatrix;
+import org.apache.commons.math3.linear.EigenDecomposition;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.AbstractOptimizationProblem;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.optim.PointVectorValuePair;
+import org.apache.commons.math3.util.FastMath;
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.util.Pair;
+
+/**
+ * A Factory for creating {@link LeastSquaresProblem}s.
+ *
+ * @since 3.3
+ */
+public class LeastSquaresFactory {
+
+ /** Prevent instantiation. */
+ private LeastSquaresFactory() {}
+
+ /**
+ * Create a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem}
+ * from the given elements. There will be no weights applied (unit weights).
+ *
+ * @param model the model function. Produces the computed values.
+ * @param observed the observed (target) values
+ * @param start the initial guess.
+ * @param weight the weight matrix
+ * @param checker convergence checker
+ * @param maxEvaluations the maximum number of times to evaluate the model
+ * @param maxIterations the maximum number to times to iterate in the algorithm
+ * @param lazyEvaluation Whether the call to {@link Evaluation#evaluate(RealVector)}
+ * will defer the evaluation until access to the value is requested.
+ * @param paramValidator Model parameters validator.
+ * @return the specified General Least Squares problem.
+ *
+ * @since 3.4
+ */
+ public static LeastSquaresProblem create(final MultivariateJacobianFunction model,
+ final RealVector observed,
+ final RealVector start,
+ final RealMatrix weight,
+ final ConvergenceChecker<Evaluation> checker,
+ final int maxEvaluations,
+ final int maxIterations,
+ final boolean lazyEvaluation,
+ final ParameterValidator paramValidator) {
+ final LeastSquaresProblem p = new LocalLeastSquaresProblem(model,
+ observed,
+ start,
+ checker,
+ maxEvaluations,
+ maxIterations,
+ lazyEvaluation,
+ paramValidator);
+ if (weight != null) {
+ return weightMatrix(p, weight);
+ } else {
+ return p;
+ }
+ }
+
+ /**
+ * Create a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem}
+ * from the given elements. There will be no weights applied (unit weights).
+ *
+ * @param model the model function. Produces the computed values.
+ * @param observed the observed (target) values
+ * @param start the initial guess.
+ * @param checker convergence checker
+ * @param maxEvaluations the maximum number of times to evaluate the model
+ * @param maxIterations the maximum number to times to iterate in the algorithm
+ * @return the specified General Least Squares problem.
+ */
+ public static LeastSquaresProblem create(final MultivariateJacobianFunction model,
+ final RealVector observed,
+ final RealVector start,
+ final ConvergenceChecker<Evaluation> checker,
+ final int maxEvaluations,
+ final int maxIterations) {
+ return create(model,
+ observed,
+ start,
+ null,
+ checker,
+ maxEvaluations,
+ maxIterations,
+ false,
+ null);
+ }
+
+ /**
+ * Create a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem}
+ * from the given elements.
+ *
+ * @param model the model function. Produces the computed values.
+ * @param observed the observed (target) values
+ * @param start the initial guess.
+ * @param weight the weight matrix
+ * @param checker convergence checker
+ * @param maxEvaluations the maximum number of times to evaluate the model
+ * @param maxIterations the maximum number to times to iterate in the algorithm
+ * @return the specified General Least Squares problem.
+ */
+ public static LeastSquaresProblem create(final MultivariateJacobianFunction model,
+ final RealVector observed,
+ final RealVector start,
+ final RealMatrix weight,
+ final ConvergenceChecker<Evaluation> checker,
+ final int maxEvaluations,
+ final int maxIterations) {
+ return weightMatrix(create(model,
+ observed,
+ start,
+ checker,
+ maxEvaluations,
+ maxIterations),
+ weight);
+ }
+
+ /**
+ * Create a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem}
+ * from the given elements.
+ * <p>
+ * This factory method is provided for continuity with previous interfaces. Newer
+ * applications should use {@link #create(MultivariateJacobianFunction, RealVector,
+ * RealVector, ConvergenceChecker, int, int)}, or {@link #create(MultivariateJacobianFunction,
+ * RealVector, RealVector, RealMatrix, ConvergenceChecker, int, int)}.
+ *
+ * @param model the model function. Produces the computed values.
+ * @param jacobian the jacobian of the model with respect to the parameters
+ * @param observed the observed (target) values
+ * @param start the initial guess.
+ * @param weight the weight matrix
+ * @param checker convergence checker
+ * @param maxEvaluations the maximum number of times to evaluate the model
+ * @param maxIterations the maximum number to times to iterate in the algorithm
+ * @return the specified General Least Squares problem.
+ */
+ public static LeastSquaresProblem create(final MultivariateVectorFunction model,
+ final MultivariateMatrixFunction jacobian,
+ final double[] observed,
+ final double[] start,
+ final RealMatrix weight,
+ final ConvergenceChecker<Evaluation> checker,
+ final int maxEvaluations,
+ final int maxIterations) {
+ return create(model(model, jacobian),
+ new ArrayRealVector(observed, false),
+ new ArrayRealVector(start, false),
+ weight,
+ checker,
+ maxEvaluations,
+ maxIterations);
+ }
+
+ /**
+ * Apply a dense weight matrix to the {@link LeastSquaresProblem}.
+ *
+ * @param problem the unweighted problem
+ * @param weights the matrix of weights
+ * @return a new {@link LeastSquaresProblem} with the weights applied. The original
+ * {@code problem} is not modified.
+ */
+ public static LeastSquaresProblem weightMatrix(final LeastSquaresProblem problem,
+ final RealMatrix weights) {
+ final RealMatrix weightSquareRoot = squareRoot(weights);
+ return new LeastSquaresAdapter(problem) {
+ /** {@inheritDoc} */
+ @Override
+ public Evaluation evaluate(final RealVector point) {
+ return new DenseWeightedEvaluation(super.evaluate(point), weightSquareRoot);
+ }
+ };
+ }
+
+ /**
+ * Apply a diagonal weight matrix to the {@link LeastSquaresProblem}.
+ *
+ * @param problem the unweighted problem
+ * @param weights the diagonal of the weight matrix
+ * @return a new {@link LeastSquaresProblem} with the weights applied. The original
+ * {@code problem} is not modified.
+ */
+ public static LeastSquaresProblem weightDiagonal(final LeastSquaresProblem problem,
+ final RealVector weights) {
+ // TODO more efficient implementation
+ return weightMatrix(problem, new DiagonalMatrix(weights.toArray()));
+ }
+
+ /**
+ * Count the evaluations of a particular problem. The {@code counter} will be
+ * incremented every time {@link LeastSquaresProblem#evaluate(RealVector)} is called on
+ * the <em>returned</em> problem.
+ *
+ * @param problem the problem to track.
+ * @param counter the counter to increment.
+ * @return a least squares problem that tracks evaluations
+ */
+ public static LeastSquaresProblem countEvaluations(final LeastSquaresProblem problem,
+ final Incrementor counter) {
+ return new LeastSquaresAdapter(problem) {
+
+ /** {@inheritDoc} */
+ @Override
+ public Evaluation evaluate(final RealVector point) {
+ counter.incrementCount();
+ return super.evaluate(point);
+ }
+
+ // Delegate the rest.
+ };
+ }
+
+ /**
+ * View a convergence checker specified for a {@link PointVectorValuePair} as one
+ * specified for an {@link Evaluation}.
+ *
+ * @param checker the convergence checker to adapt.
+ * @return a convergence checker that delegates to {@code checker}.
+ */
+ public static ConvergenceChecker<Evaluation> evaluationChecker(final ConvergenceChecker<PointVectorValuePair> checker) {
+ return new ConvergenceChecker<Evaluation>() {
+ /** {@inheritDoc} */
+ public boolean converged(final int iteration,
+ final Evaluation previous,
+ final Evaluation current) {
+ return checker.converged(
+ iteration,
+ new PointVectorValuePair(
+ previous.getPoint().toArray(),
+ previous.getResiduals().toArray(),
+ false),
+ new PointVectorValuePair(
+ current.getPoint().toArray(),
+ current.getResiduals().toArray(),
+ false)
+ );
+ }
+ };
+ }
+
+ /**
+ * Computes the square-root of the weight matrix.
+ *
+ * @param m Symmetric, positive-definite (weight) matrix.
+ * @return the square-root of the weight matrix.
+ */
+ private static RealMatrix squareRoot(final RealMatrix m) {
+ if (m instanceof DiagonalMatrix) {
+ final int dim = m.getRowDimension();
+ final RealMatrix sqrtM = new DiagonalMatrix(dim);
+ for (int i = 0; i < dim; i++) {
+ sqrtM.setEntry(i, i, FastMath.sqrt(m.getEntry(i, i)));
+ }
+ return sqrtM;
+ } else {
+ final EigenDecomposition dec = new EigenDecomposition(m);
+ return dec.getSquareRoot();
+ }
+ }
+
+ /**
+ * Combine a {@link MultivariateVectorFunction} with a {@link
+ * MultivariateMatrixFunction} to produce a {@link MultivariateJacobianFunction}.
+ *
+ * @param value the vector value function
+ * @param jacobian the Jacobian function
+ * @return a function that computes both at the same time
+ */
+ public static MultivariateJacobianFunction model(final MultivariateVectorFunction value,
+ final MultivariateMatrixFunction jacobian) {
+ return new LocalValueAndJacobianFunction(value, jacobian);
+ }
+
+ /**
+ * Combine a {@link MultivariateVectorFunction} with a {@link
+ * MultivariateMatrixFunction} to produce a {@link MultivariateJacobianFunction}.
+ *
+ * @param value the vector value function
+ * @param jacobian the Jacobian function
+ * @return a function that computes both at the same time
+ */
+ private static class LocalValueAndJacobianFunction
+ implements ValueAndJacobianFunction {
+ /** Model. */
+ private final MultivariateVectorFunction value;
+ /** Model's Jacobian. */
+ private final MultivariateMatrixFunction jacobian;
+
+ /**
+ * @param value Model function.
+ * @param jacobian Model's Jacobian function.
+ */
+ LocalValueAndJacobianFunction(final MultivariateVectorFunction value,
+ final MultivariateMatrixFunction jacobian) {
+ this.value = value;
+ this.jacobian = jacobian;
+ }
+
+ /** {@inheritDoc} */
+ public Pair<RealVector, RealMatrix> value(final RealVector point) {
+ //TODO get array from RealVector without copying?
+ final double[] p = point.toArray();
+
+ // Evaluate.
+ return new Pair<RealVector, RealMatrix>(computeValue(p),
+ computeJacobian(p));
+ }
+
+ /** {@inheritDoc} */
+ public RealVector computeValue(final double[] params) {
+ return new ArrayRealVector(value.value(params), false);
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix computeJacobian(final double[] params) {
+ return new Array2DRowRealMatrix(jacobian.value(params), false);
+ }
+ }
+
+
+ /**
+ * A private, "field" immutable (not "real" immutable) implementation of {@link
+ * LeastSquaresProblem}.
+ * @since 3.3
+ */
+ private static class LocalLeastSquaresProblem
+ extends AbstractOptimizationProblem<Evaluation>
+ implements LeastSquaresProblem {
+
+ /** Target values for the model function at optimum. */
+ private final RealVector target;
+ /** Model function. */
+ private final MultivariateJacobianFunction model;
+ /** Initial guess. */
+ private final RealVector start;
+ /** Whether to use lazy evaluation. */
+ private final boolean lazyEvaluation;
+ /** Model parameters validator. */
+ private final ParameterValidator paramValidator;
+
+ /**
+ * Create a {@link LeastSquaresProblem} from the given data.
+ *
+ * @param model the model function
+ * @param target the observed data
+ * @param start the initial guess
+ * @param checker the convergence checker
+ * @param maxEvaluations the allowed evaluations
+ * @param maxIterations the allowed iterations
+ * @param lazyEvaluation Whether the call to {@link Evaluation#evaluate(RealVector)}
+ * will defer the evaluation until access to the value is requested.
+ * @param paramValidator Model parameters validator.
+ */
+ LocalLeastSquaresProblem(final MultivariateJacobianFunction model,
+ final RealVector target,
+ final RealVector start,
+ final ConvergenceChecker<Evaluation> checker,
+ final int maxEvaluations,
+ final int maxIterations,
+ final boolean lazyEvaluation,
+ final ParameterValidator paramValidator) {
+ super(maxEvaluations, maxIterations, checker);
+ this.target = target;
+ this.model = model;
+ this.start = start;
+ this.lazyEvaluation = lazyEvaluation;
+ this.paramValidator = paramValidator;
+
+ if (lazyEvaluation &&
+ !(model instanceof ValueAndJacobianFunction)) {
+ // Lazy evaluation requires that value and Jacobian
+ // can be computed separately.
+ throw new MathIllegalStateException(LocalizedFormats.INVALID_IMPLEMENTATION,
+ model.getClass().getName());
+ }
+ }
+
+ /** {@inheritDoc} */
+ public int getObservationSize() {
+ return target.getDimension();
+ }
+
+ /** {@inheritDoc} */
+ public int getParameterSize() {
+ return start.getDimension();
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getStart() {
+ return start == null ? null : start.copy();
+ }
+
+ /** {@inheritDoc} */
+ public Evaluation evaluate(final RealVector point) {
+ // Copy so optimizer can change point without changing our instance.
+ final RealVector p = paramValidator == null ?
+ point.copy() :
+ paramValidator.validate(point.copy());
+
+ if (lazyEvaluation) {
+ return new LazyUnweightedEvaluation((ValueAndJacobianFunction) model,
+ target,
+ p);
+ } else {
+ // Evaluate value and jacobian in one function call.
+ final Pair<RealVector, RealMatrix> value = model.value(p);
+ return new UnweightedEvaluation(value.getFirst(),
+ value.getSecond(),
+ target,
+ p);
+ }
+ }
+
+ /**
+ * Container with the model evaluation at a particular point.
+ */
+ private static class UnweightedEvaluation extends AbstractEvaluation {
+ /** Point of evaluation. */
+ private final RealVector point;
+ /** Derivative at point. */
+ private final RealMatrix jacobian;
+ /** Computed residuals. */
+ private final RealVector residuals;
+
+ /**
+ * Create an {@link Evaluation} with no weights.
+ *
+ * @param values the computed function values
+ * @param jacobian the computed function Jacobian
+ * @param target the observed values
+ * @param point the abscissa
+ */
+ private UnweightedEvaluation(final RealVector values,
+ final RealMatrix jacobian,
+ final RealVector target,
+ final RealVector point) {
+ super(target.getDimension());
+ this.jacobian = jacobian;
+ this.point = point;
+ this.residuals = target.subtract(values);
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix getJacobian() {
+ return jacobian;
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getPoint() {
+ return point;
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getResiduals() {
+ return residuals;
+ }
+ }
+
+ /**
+ * Container with the model <em>lazy</em> evaluation at a particular point.
+ */
+ private static class LazyUnweightedEvaluation extends AbstractEvaluation {
+ /** Point of evaluation. */
+ private final RealVector point;
+ /** Model and Jacobian functions. */
+ private final ValueAndJacobianFunction model;
+ /** Target values for the model function at optimum. */
+ private final RealVector target;
+
+ /**
+ * Create an {@link Evaluation} with no weights.
+ *
+ * @param model the model function
+ * @param target the observed values
+ * @param point the abscissa
+ */
+ private LazyUnweightedEvaluation(final ValueAndJacobianFunction model,
+ final RealVector target,
+ final RealVector point) {
+ super(target.getDimension());
+ // Safe to cast as long as we control usage of this class.
+ this.model = model;
+ this.point = point;
+ this.target = target;
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix getJacobian() {
+ return model.computeJacobian(point.toArray());
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getPoint() {
+ return point;
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getResiduals() {
+ return target.subtract(model.computeValue(point.toArray()));
+ }
+ }
+ }
+}
+
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresOptimizer.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresOptimizer.java
new file mode 100644
index 0000000..50d5b8a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresOptimizer.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+/**
+ * An algorithm that can be applied to a non-linear least squares problem.
+ *
+ * @since 3.3
+ */
+public interface LeastSquaresOptimizer {
+
+ /**
+ * Solve the non-linear least squares problem.
+ *
+ *
+ * @param leastSquaresProblem the problem definition, including model function and
+ * convergence criteria.
+ * @return The optimum.
+ */
+ Optimum optimize(LeastSquaresProblem leastSquaresProblem);
+
+ /**
+ * The optimum found by the optimizer. This object contains the point, its value, and
+ * some metadata.
+ */
+ //TODO Solution?
+ interface Optimum extends LeastSquaresProblem.Evaluation {
+
+ /**
+ * Get the number of times the model was evaluated in order to produce this
+ * optimum.
+ *
+ * @return the number of model (objective) function evaluations
+ */
+ int getEvaluations();
+
+ /**
+ * Get the number of times the algorithm iterated in order to produce this
+ * optimum. In general least squares it is common to have one {@link
+ * #getEvaluations() evaluation} per iterations.
+ *
+ * @return the number of iterations
+ */
+ int getIterations();
+
+ }
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresProblem.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresProblem.java
new file mode 100644
index 0000000..097ff81
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LeastSquaresProblem.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.optim.OptimizationProblem;
+
+/**
+ * The data necessary to define a non-linear least squares problem.
+ * <p>
+ * Includes the observed values, computed model function, and
+ * convergence/divergence criteria. Weights are implicit in {@link
+ * Evaluation#getResiduals()} and {@link Evaluation#getJacobian()}.
+ * </p>
+ * <p>
+ * Instances are typically either created progressively using a {@link
+ * LeastSquaresBuilder builder} or created at once using a {@link LeastSquaresFactory
+ * factory}.
+ * </p>
+ * @see LeastSquaresBuilder
+ * @see LeastSquaresFactory
+ * @see LeastSquaresAdapter
+ *
+ * @since 3.3
+ */
+public interface LeastSquaresProblem extends OptimizationProblem<LeastSquaresProblem.Evaluation> {
+
+ /**
+ * Gets the initial guess.
+ *
+ * @return the initial guess values.
+ */
+ RealVector getStart();
+
+ /**
+ * Get the number of observations (rows in the Jacobian) in this problem.
+ *
+ * @return the number of scalar observations
+ */
+ int getObservationSize();
+
+ /**
+ * Get the number of parameters (columns in the Jacobian) in this problem.
+ *
+ * @return the number of scalar parameters
+ */
+ int getParameterSize();
+
+ /**
+ * Evaluate the model at the specified point.
+ *
+ *
+ * @param point the parameter values.
+ * @return the model's value and derivative at the given point.
+ * @throws org.apache.commons.math3.exception.TooManyEvaluationsException
+ * if the maximal number of evaluations (of the model vector function) is
+ * exceeded.
+ */
+ Evaluation evaluate(RealVector point);
+
+ /**
+ * An evaluation of a {@link LeastSquaresProblem} at a particular point. This class
+ * also computes several quantities derived from the value and its Jacobian.
+ */
+ public interface Evaluation {
+
+ /**
+ * Get the covariance matrix of the optimized parameters. <br/> Note that this
+ * operation involves the inversion of the <code>J<sup>T</sup>J</code> matrix,
+ * where {@code J} is the Jacobian matrix. The {@code threshold} parameter is a
+ * way for the caller to specify that the result of this computation should be
+ * considered meaningless, and thus trigger an exception.
+ *
+ *
+ * @param threshold Singularity threshold.
+ * @return the covariance matrix.
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed (singular problem).
+ */
+ RealMatrix getCovariances(double threshold);
+
+ /**
+ * Get an estimate of the standard deviation of the parameters. The returned
+ * values are the square root of the diagonal coefficients of the covariance
+ * matrix, {@code sd(a[i]) ~= sqrt(C[i][i])}, where {@code a[i]} is the optimized
+ * value of the {@code i}-th parameter, and {@code C} is the covariance matrix.
+ *
+ *
+ * @param covarianceSingularityThreshold Singularity threshold (see {@link
+ * #getCovariances(double) computeCovariances}).
+ * @return an estimate of the standard deviation of the optimized parameters
+ * @throws org.apache.commons.math3.linear.SingularMatrixException
+ * if the covariance matrix cannot be computed.
+ */
+ RealVector getSigma(double covarianceSingularityThreshold);
+
+ /**
+ * Get the normalized cost. It is the square-root of the sum of squared of
+ * the residuals, divided by the number of measurements.
+ *
+ * @return the cost.
+ */
+ double getRMS();
+
+ /**
+ * Get the weighted Jacobian matrix.
+ *
+ * @return the weighted Jacobian: W<sup>1/2</sup> J.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the Jacobian dimension does not match problem dimension.
+ */
+ RealMatrix getJacobian();
+
+ /**
+ * Get the cost.
+ *
+ * @return the cost.
+ * @see #getResiduals()
+ */
+ double getCost();
+
+ /**
+ * Get the weighted residuals. The residual is the difference between the
+ * observed (target) values and the model (objective function) value. There is one
+ * residual for each element of the vector-valued function. The raw residuals are
+ * then multiplied by the square root of the weight matrix.
+ *
+ * @return the weighted residuals: W<sup>1/2</sup> K.
+ * @throws org.apache.commons.math3.exception.DimensionMismatchException
+ * if the residuals have the wrong length.
+ */
+ RealVector getResiduals();
+
+ /**
+ * Get the abscissa (independent variables) of this evaluation.
+ *
+ * @return the point provided to {@link #evaluate(RealVector)}.
+ */
+ RealVector getPoint();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java
new file mode 100644
index 0000000..358d240
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/LevenbergMarquardtOptimizer.java
@@ -0,0 +1,1042 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import java.util.Arrays;
+
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.ArrayRealVector;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.exception.ConvergenceException;
+import org.apache.commons.math3.exception.util.LocalizedFormats;
+import org.apache.commons.math3.optim.ConvergenceChecker;
+import org.apache.commons.math3.util.Incrementor;
+import org.apache.commons.math3.util.Precision;
+import org.apache.commons.math3.util.FastMath;
+
+
+/**
+ * This class solves a least-squares problem using the Levenberg-Marquardt
+ * algorithm.
+ *
+ * <p>This implementation <em>should</em> work even for over-determined systems
+ * (i.e. systems having more point than equations). Over-determined systems
+ * are solved by ignoring the point which have the smallest impact according
+ * to their jacobian column norm. Only the rank of the matrix and some loop bounds
+ * are changed to implement this.</p>
+ *
+ * <p>The resolution engine is a simple translation of the MINPACK <a
+ * href="http://www.netlib.org/minpack/lmder.f">lmder</a> routine with minor
+ * changes. The changes include the over-determined resolution, the use of
+ * inherited convergence checker and the Q.R. decomposition which has been
+ * rewritten following the algorithm described in the
+ * P. Lascaux and R. Theodor book <i>Analyse num&eacute;rique matricielle
+ * appliqu&eacute;e &agrave; l'art de l'ing&eacute;nieur</i>, Masson 1986.</p>
+ * <p>The authors of the original fortran version are:
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * The redistribution policy for MINPACK is available <a
+ * href="http://www.netlib.org/minpack/disclaimer">here</a>, for convenience, it
+ * is reproduced below.</p>
+ *
+ * <table border="0" width="80%" cellpadding="10" align="center" bgcolor="#E0E0E0">
+ * <tr><td>
+ * Minpack Copyright Notice (1999) University of Chicago.
+ * All rights reserved
+ * </td></tr>
+ * <tr><td>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * <ol>
+ * <li>Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.</li>
+ * <li>Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.</li>
+ * <li>The end-user documentation included with the redistribution, if any,
+ * must include the following acknowledgment:
+ * <code>This product includes software developed by the University of
+ * Chicago, as Operator of Argonne National Laboratory.</code>
+ * Alternately, this acknowledgment may appear in the software itself,
+ * if and wherever such third-party acknowledgments normally appear.</li>
+ * <li><strong>WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE
+ * UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND
+ * THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE
+ * OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY
+ * OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR
+ * USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF
+ * THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4)
+ * DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION
+ * UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL
+ * BE CORRECTED.</strong></li>
+ * <li><strong>LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT
+ * HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF
+ * ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT,
+ * INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF
+ * ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF
+ * PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER
+ * SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT
+ * (INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE,
+ * EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE
+ * POSSIBILITY OF SUCH LOSS OR DAMAGES.</strong></li>
+ * <ol></td></tr>
+ * </table>
+ *
+ * @since 3.3
+ */
+public class LevenbergMarquardtOptimizer implements LeastSquaresOptimizer {
+
+ /** Twice the "epsilon machine". */
+ private static final double TWO_EPS = 2 * Precision.EPSILON;
+
+ /* configuration parameters */
+ /** Positive input variable used in determining the initial step bound. */
+ private final double initialStepBoundFactor;
+ /** Desired relative error in the sum of squares. */
+ private final double costRelativeTolerance;
+ /** Desired relative error in the approximate solution parameters. */
+ private final double parRelativeTolerance;
+ /** Desired max cosine on the orthogonality between the function vector
+ * and the columns of the jacobian. */
+ private final double orthoTolerance;
+ /** Threshold for QR ranking. */
+ private final double qrRankingThreshold;
+
+ /** Default constructor.
+ * <p>
+ * The default values for the algorithm settings are:
+ * <ul>
+ * <li>Initial step bound factor: 100</li>
+ * <li>Cost relative tolerance: 1e-10</li>
+ * <li>Parameters relative tolerance: 1e-10</li>
+ * <li>Orthogonality tolerance: 1e-10</li>
+ * <li>QR ranking threshold: {@link Precision#SAFE_MIN}</li>
+ * </ul>
+ **/
+ public LevenbergMarquardtOptimizer() {
+ this(100, 1e-10, 1e-10, 1e-10, Precision.SAFE_MIN);
+ }
+
+ /**
+ * Construct an instance with all parameters specified.
+ *
+ * @param initialStepBoundFactor initial step bound factor
+ * @param costRelativeTolerance cost relative tolerance
+ * @param parRelativeTolerance parameters relative tolerance
+ * @param orthoTolerance orthogonality tolerance
+ * @param qrRankingThreshold threshold in the QR decomposition. Columns with a 2
+ * norm less than this threshold are considered to be
+ * all 0s.
+ */
+ public LevenbergMarquardtOptimizer(
+ final double initialStepBoundFactor,
+ final double costRelativeTolerance,
+ final double parRelativeTolerance,
+ final double orthoTolerance,
+ final double qrRankingThreshold) {
+ this.initialStepBoundFactor = initialStepBoundFactor;
+ this.costRelativeTolerance = costRelativeTolerance;
+ this.parRelativeTolerance = parRelativeTolerance;
+ this.orthoTolerance = orthoTolerance;
+ this.qrRankingThreshold = qrRankingThreshold;
+ }
+
+ /**
+ * @param newInitialStepBoundFactor Positive input variable used in
+ * determining the initial step bound. This bound is set to the
+ * product of initialStepBoundFactor and the euclidean norm of
+ * {@code diag * x} if non-zero, or else to {@code newInitialStepBoundFactor}
+ * itself. In most cases factor should lie in the interval
+ * {@code (0.1, 100.0)}. {@code 100} is a generally recommended value.
+ * of the matrix is reduced.
+ * @return a new instance.
+ */
+ public LevenbergMarquardtOptimizer withInitialStepBoundFactor(double newInitialStepBoundFactor) {
+ return new LevenbergMarquardtOptimizer(
+ newInitialStepBoundFactor,
+ costRelativeTolerance,
+ parRelativeTolerance,
+ orthoTolerance,
+ qrRankingThreshold);
+ }
+
+ /**
+ * @param newCostRelativeTolerance Desired relative error in the sum of squares.
+ * @return a new instance.
+ */
+ public LevenbergMarquardtOptimizer withCostRelativeTolerance(double newCostRelativeTolerance) {
+ return new LevenbergMarquardtOptimizer(
+ initialStepBoundFactor,
+ newCostRelativeTolerance,
+ parRelativeTolerance,
+ orthoTolerance,
+ qrRankingThreshold);
+ }
+
+ /**
+ * @param newParRelativeTolerance Desired relative error in the approximate solution
+ * parameters.
+ * @return a new instance.
+ */
+ public LevenbergMarquardtOptimizer withParameterRelativeTolerance(double newParRelativeTolerance) {
+ return new LevenbergMarquardtOptimizer(
+ initialStepBoundFactor,
+ costRelativeTolerance,
+ newParRelativeTolerance,
+ orthoTolerance,
+ qrRankingThreshold);
+ }
+
+ /**
+ * Modifies the given parameter.
+ *
+ * @param newOrthoTolerance Desired max cosine on the orthogonality between
+ * the function vector and the columns of the Jacobian.
+ * @return a new instance.
+ */
+ public LevenbergMarquardtOptimizer withOrthoTolerance(double newOrthoTolerance) {
+ return new LevenbergMarquardtOptimizer(
+ initialStepBoundFactor,
+ costRelativeTolerance,
+ parRelativeTolerance,
+ newOrthoTolerance,
+ qrRankingThreshold);
+ }
+
+ /**
+ * @param newQRRankingThreshold Desired threshold for QR ranking.
+ * If the squared norm of a column vector is smaller or equal to this
+ * threshold during QR decomposition, it is considered to be a zero vector
+ * and hence the rank of the matrix is reduced.
+ * @return a new instance.
+ */
+ public LevenbergMarquardtOptimizer withRankingThreshold(double newQRRankingThreshold) {
+ return new LevenbergMarquardtOptimizer(
+ initialStepBoundFactor,
+ costRelativeTolerance,
+ parRelativeTolerance,
+ orthoTolerance,
+ newQRRankingThreshold);
+ }
+
+ /**
+ * Gets the value of a tuning parameter.
+ * @see #withInitialStepBoundFactor(double)
+ *
+ * @return the parameter's value.
+ */
+ public double getInitialStepBoundFactor() {
+ return initialStepBoundFactor;
+ }
+
+ /**
+ * Gets the value of a tuning parameter.
+ * @see #withCostRelativeTolerance(double)
+ *
+ * @return the parameter's value.
+ */
+ public double getCostRelativeTolerance() {
+ return costRelativeTolerance;
+ }
+
+ /**
+ * Gets the value of a tuning parameter.
+ * @see #withParameterRelativeTolerance(double)
+ *
+ * @return the parameter's value.
+ */
+ public double getParameterRelativeTolerance() {
+ return parRelativeTolerance;
+ }
+
+ /**
+ * Gets the value of a tuning parameter.
+ * @see #withOrthoTolerance(double)
+ *
+ * @return the parameter's value.
+ */
+ public double getOrthoTolerance() {
+ return orthoTolerance;
+ }
+
+ /**
+ * Gets the value of a tuning parameter.
+ * @see #withRankingThreshold(double)
+ *
+ * @return the parameter's value.
+ */
+ public double getRankingThreshold() {
+ return qrRankingThreshold;
+ }
+
+ /** {@inheritDoc} */
+ public Optimum optimize(final LeastSquaresProblem problem) {
+ // Pull in relevant data from the problem as locals.
+ final int nR = problem.getObservationSize(); // Number of observed data.
+ final int nC = problem.getParameterSize(); // Number of parameters.
+ // Counters.
+ final Incrementor iterationCounter = problem.getIterationCounter();
+ final Incrementor evaluationCounter = problem.getEvaluationCounter();
+ // Convergence criterion.
+ final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();
+
+ // arrays shared with the other private methods
+ final int solvedCols = FastMath.min(nR, nC);
+ /* Parameters evolution direction associated with lmPar. */
+ double[] lmDir = new double[nC];
+ /* Levenberg-Marquardt parameter. */
+ double lmPar = 0;
+
+ // local point
+ double delta = 0;
+ double xNorm = 0;
+ double[] diag = new double[nC];
+ double[] oldX = new double[nC];
+ double[] oldRes = new double[nR];
+ double[] qtf = new double[nR];
+ double[] work1 = new double[nC];
+ double[] work2 = new double[nC];
+ double[] work3 = new double[nC];
+
+
+ // Evaluate the function at the starting point and calculate its norm.
+ evaluationCounter.incrementCount();
+ //value will be reassigned in the loop
+ Evaluation current = problem.evaluate(problem.getStart());
+ double[] currentResiduals = current.getResiduals().toArray();
+ double currentCost = current.getCost();
+ double[] currentPoint = current.getPoint().toArray();
+
+ // Outer loop.
+ boolean firstIteration = true;
+ while (true) {
+ iterationCounter.incrementCount();
+
+ final Evaluation previous = current;
+
+ // QR decomposition of the jacobian matrix
+ final InternalData internalData
+ = qrDecomposition(current.getJacobian(), solvedCols);
+ final double[][] weightedJacobian = internalData.weightedJacobian;
+ final int[] permutation = internalData.permutation;
+ final double[] diagR = internalData.diagR;
+ final double[] jacNorm = internalData.jacNorm;
+
+ //residuals already have weights applied
+ double[] weightedResidual = currentResiduals;
+ for (int i = 0; i < nR; i++) {
+ qtf[i] = weightedResidual[i];
+ }
+
+ // compute Qt.res
+ qTy(qtf, internalData);
+
+ // now we don't need Q anymore,
+ // so let jacobian contain the R matrix with its diagonal elements
+ for (int k = 0; k < solvedCols; ++k) {
+ int pk = permutation[k];
+ weightedJacobian[k][pk] = diagR[pk];
+ }
+
+ if (firstIteration) {
+ // scale the point according to the norms of the columns
+ // of the initial jacobian
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double dk = jacNorm[k];
+ if (dk == 0) {
+ dk = 1.0;
+ }
+ double xk = dk * currentPoint[k];
+ xNorm += xk * xk;
+ diag[k] = dk;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // initialize the step bound delta
+ delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
+ }
+
+ // check orthogonality between function vector and jacobian columns
+ double maxCosine = 0;
+ if (currentCost != 0) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = jacNorm[pj];
+ if (s != 0) {
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qtf[i];
+ }
+ maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
+ }
+ }
+ }
+ if (maxCosine <= orthoTolerance) {
+ // Convergence has been reached.
+ return new OptimumImpl(
+ current,
+ evaluationCounter.getCount(),
+ iterationCounter.getCount());
+ }
+
+ // rescale if necessary
+ for (int j = 0; j < nC; ++j) {
+ diag[j] = FastMath.max(diag[j], jacNorm[j]);
+ }
+
+ // Inner loop.
+ for (double ratio = 0; ratio < 1.0e-4;) {
+
+ // save the state
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ oldX[pj] = currentPoint[pj];
+ }
+ final double previousCost = currentCost;
+ double[] tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+
+ // determine the Levenberg-Marquardt parameter
+ lmPar = determineLMParameter(qtf, delta, diag,
+ internalData, solvedCols,
+ work1, work2, work3, lmDir, lmPar);
+
+ // compute the new point and the norm of the evolution direction
+ double lmNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ lmDir[pj] = -lmDir[pj];
+ currentPoint[pj] = oldX[pj] + lmDir[pj];
+ double s = diag[pj] * lmDir[pj];
+ lmNorm += s * s;
+ }
+ lmNorm = FastMath.sqrt(lmNorm);
+ // on the first iteration, adjust the initial step bound.
+ if (firstIteration) {
+ delta = FastMath.min(delta, lmNorm);
+ }
+
+ // Evaluate the function at x + p and calculate its norm.
+ evaluationCounter.incrementCount();
+ current = problem.evaluate(new ArrayRealVector(currentPoint));
+ currentResiduals = current.getResiduals().toArray();
+ currentCost = current.getCost();
+ currentPoint = current.getPoint().toArray();
+
+ // compute the scaled actual reduction
+ double actRed = -1.0;
+ if (0.1 * currentCost < previousCost) {
+ double r = currentCost / previousCost;
+ actRed = 1.0 - r * r;
+ }
+
+ // compute the scaled predicted reduction
+ // and the scaled directional derivative
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double dirJ = lmDir[pj];
+ work1[j] = 0;
+ for (int i = 0; i <= j; ++i) {
+ work1[i] += weightedJacobian[i][pj] * dirJ;
+ }
+ }
+ double coeff1 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ coeff1 += work1[j] * work1[j];
+ }
+ double pc2 = previousCost * previousCost;
+ coeff1 /= pc2;
+ double coeff2 = lmPar * lmNorm * lmNorm / pc2;
+ double preRed = coeff1 + 2 * coeff2;
+ double dirDer = -(coeff1 + coeff2);
+
+ // ratio of the actual to the predicted reduction
+ ratio = (preRed == 0) ? 0 : (actRed / preRed);
+
+ // update the step bound
+ if (ratio <= 0.25) {
+ double tmp =
+ (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
+ if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
+ tmp = 0.1;
+ }
+ delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
+ lmPar /= tmp;
+ } else if ((lmPar == 0) || (ratio >= 0.75)) {
+ delta = 2 * lmNorm;
+ lmPar *= 0.5;
+ }
+
+ // test for successful iteration.
+ if (ratio >= 1.0e-4) {
+ // successful iteration, update the norm
+ firstIteration = false;
+ xNorm = 0;
+ for (int k = 0; k < nC; ++k) {
+ double xK = diag[k] * currentPoint[k];
+ xNorm += xK * xK;
+ }
+ xNorm = FastMath.sqrt(xNorm);
+
+ // tests for convergence.
+ if (checker != null && checker.converged(iterationCounter.getCount(), previous, current)) {
+ return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
+ }
+ } else {
+ // failed iteration, reset the previous values
+ currentCost = previousCost;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ currentPoint[pj] = oldX[pj];
+ }
+ tmpVec = weightedResidual;
+ weightedResidual = oldRes;
+ oldRes = tmpVec;
+ // Reset "current" to previous values.
+ current = previous;
+ }
+
+ // Default convergence criteria.
+ if ((FastMath.abs(actRed) <= costRelativeTolerance &&
+ preRed <= costRelativeTolerance &&
+ ratio <= 2.0) ||
+ delta <= parRelativeTolerance * xNorm) {
+ return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
+ }
+
+ // tests for termination and stringent tolerances
+ if (FastMath.abs(actRed) <= TWO_EPS &&
+ preRed <= TWO_EPS &&
+ ratio <= 2.0) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE,
+ costRelativeTolerance);
+ } else if (delta <= TWO_EPS * xNorm) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE,
+ parRelativeTolerance);
+ } else if (maxCosine <= TWO_EPS) {
+ throw new ConvergenceException(LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE,
+ orthoTolerance);
+ }
+ }
+ }
+ }
+
+ /**
+ * Holds internal data.
+ * This structure was created so that all optimizer fields can be "final".
+ * Code should be further refactored in order to not pass around arguments
+ * that will modified in-place (cf. "work" arrays).
+ */
+ private static class InternalData {
+ /** Weighted Jacobian. */
+ private final double[][] weightedJacobian;
+ /** Columns permutation array. */
+ private final int[] permutation;
+ /** Rank of the Jacobian matrix. */
+ private final int rank;
+ /** Diagonal elements of the R matrix in the QR decomposition. */
+ private final double[] diagR;
+ /** Norms of the columns of the jacobian matrix. */
+ private final double[] jacNorm;
+ /** Coefficients of the Householder transforms vectors. */
+ private final double[] beta;
+
+ /**
+ * @param weightedJacobian Weighted Jacobian.
+ * @param permutation Columns permutation array.
+ * @param rank Rank of the Jacobian matrix.
+ * @param diagR Diagonal elements of the R matrix in the QR decomposition.
+ * @param jacNorm Norms of the columns of the jacobian matrix.
+ * @param beta Coefficients of the Householder transforms vectors.
+ */
+ InternalData(double[][] weightedJacobian,
+ int[] permutation,
+ int rank,
+ double[] diagR,
+ double[] jacNorm,
+ double[] beta) {
+ this.weightedJacobian = weightedJacobian;
+ this.permutation = permutation;
+ this.rank = rank;
+ this.diagR = diagR;
+ this.jacNorm = jacNorm;
+ this.beta = beta;
+ }
+ }
+
+ /**
+ * Determines the Levenberg-Marquardt parameter.
+ *
+ * <p>This implementation is a translation in Java of the MINPACK
+ * <a href="http://www.netlib.org/minpack/lmpar.f">lmpar</a>
+ * routine.</p>
+ * <p>This method sets the lmPar and lmDir attributes.</p>
+ * <p>The authors of the original fortran function are:</p>
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * <p>Luc Maisonobe did the Java translation.</p>
+ *
+ * @param qy Array containing qTy.
+ * @param delta Upper bound on the euclidean norm of diagR * lmDir.
+ * @param diag Diagonal matrix.
+ * @param internalData Data (modified in-place in this method).
+ * @param solvedCols Number of solved point.
+ * @param work1 work array
+ * @param work2 work array
+ * @param work3 work array
+ * @param lmDir the "returned" LM direction will be stored in this array.
+ * @param lmPar the value of the LM parameter from the previous iteration.
+ * @return the new LM parameter
+ */
+ private double determineLMParameter(double[] qy, double delta, double[] diag,
+ InternalData internalData, int solvedCols,
+ double[] work1, double[] work2, double[] work3,
+ double[] lmDir, double lmPar) {
+ final double[][] weightedJacobian = internalData.weightedJacobian;
+ final int[] permutation = internalData.permutation;
+ final int rank = internalData.rank;
+ final double[] diagR = internalData.diagR;
+
+ final int nC = weightedJacobian[0].length;
+
+ // compute and store in x the gauss-newton direction, if the
+ // jacobian is rank-deficient, obtain a least squares solution
+ for (int j = 0; j < rank; ++j) {
+ lmDir[permutation[j]] = qy[j];
+ }
+ for (int j = rank; j < nC; ++j) {
+ lmDir[permutation[j]] = 0;
+ }
+ for (int k = rank - 1; k >= 0; --k) {
+ int pk = permutation[k];
+ double ypk = lmDir[pk] / diagR[pk];
+ for (int i = 0; i < k; ++i) {
+ lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
+ }
+ lmDir[pk] = ypk;
+ }
+
+ // evaluate the function at the origin, and test
+ // for acceptance of the Gauss-Newton direction
+ double dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work1[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double fp = dxNorm - delta;
+ if (fp <= 0.1 * delta) {
+ lmPar = 0;
+ return lmPar;
+ }
+
+ // if the jacobian is not rank deficient, the Newton step provides
+ // a lower bound, parl, for the zero of the function,
+ // otherwise set this bound to zero
+ double sum2;
+ double parl = 0;
+ if (rank == solvedCols) {
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] *= diag[pj] / dxNorm;
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i < j; ++i) {
+ sum += weightedJacobian[i][pj] * work1[permutation[i]];
+ }
+ double s = (work1[pj] - sum) / diagR[pj];
+ work1[pj] = s;
+ sum2 += s * s;
+ }
+ parl = fp / (delta * sum2);
+ }
+
+ // calculate an upper bound, paru, for the zero of the function
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = 0; i <= j; ++i) {
+ sum += weightedJacobian[i][pj] * qy[i];
+ }
+ sum /= diag[pj];
+ sum2 += sum * sum;
+ }
+ double gNorm = FastMath.sqrt(sum2);
+ double paru = gNorm / delta;
+ if (paru == 0) {
+ paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
+ }
+
+ // if the input par lies outside of the interval (parl,paru),
+ // set par to the closer endpoint
+ lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
+ if (lmPar == 0) {
+ lmPar = gNorm / dxNorm;
+ }
+
+ for (int countdown = 10; countdown >= 0; --countdown) {
+
+ // evaluate the function at the current value of lmPar
+ if (lmPar == 0) {
+ lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
+ }
+ double sPar = FastMath.sqrt(lmPar);
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = sPar * diag[pj];
+ }
+ determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);
+
+ dxNorm = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ double s = diag[pj] * lmDir[pj];
+ work3[pj] = s;
+ dxNorm += s * s;
+ }
+ dxNorm = FastMath.sqrt(dxNorm);
+ double previousFP = fp;
+ fp = dxNorm - delta;
+
+ // if the function is small enough, accept the current value
+ // of lmPar, also test for the exceptional cases where parl is zero
+ if (FastMath.abs(fp) <= 0.1 * delta ||
+ (parl == 0 &&
+ fp <= previousFP &&
+ previousFP < 0)) {
+ return lmPar;
+ }
+
+ // compute the Newton correction
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] = work3[pj] * diag[pj] / dxNorm;
+ }
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ work1[pj] /= work2[j];
+ double tmp = work1[pj];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
+ }
+ }
+ sum2 = 0;
+ for (int j = 0; j < solvedCols; ++j) {
+ double s = work1[permutation[j]];
+ sum2 += s * s;
+ }
+ double correction = fp / (delta * sum2);
+
+ // depending on the sign of the function, update parl or paru.
+ if (fp > 0) {
+ parl = FastMath.max(parl, lmPar);
+ } else if (fp < 0) {
+ paru = FastMath.min(paru, lmPar);
+ }
+
+ // compute an improved estimate for lmPar
+ lmPar = FastMath.max(parl, lmPar + correction);
+ }
+
+ return lmPar;
+ }
+
+ /**
+ * Solve a*x = b and d*x = 0 in the least squares sense.
+ * <p>This implementation is a translation in Java of the MINPACK
+ * <a href="http://www.netlib.org/minpack/qrsolv.f">qrsolv</a>
+ * routine.</p>
+ * <p>This method sets the lmDir and lmDiag attributes.</p>
+ * <p>The authors of the original fortran function are:</p>
+ * <ul>
+ * <li>Argonne National Laboratory. MINPACK project. March 1980</li>
+ * <li>Burton S. Garbow</li>
+ * <li>Kenneth E. Hillstrom</li>
+ * <li>Jorge J. More</li>
+ * </ul>
+ * <p>Luc Maisonobe did the Java translation.</p>
+ *
+ * @param qy array containing qTy
+ * @param diag diagonal matrix
+ * @param lmDiag diagonal elements associated with lmDir
+ * @param internalData Data (modified in-place in this method).
+ * @param solvedCols Number of sloved point.
+ * @param work work array
+ * @param lmDir the "returned" LM direction is stored in this array
+ */
+ private void determineLMDirection(double[] qy, double[] diag,
+ double[] lmDiag,
+ InternalData internalData,
+ int solvedCols,
+ double[] work,
+ double[] lmDir) {
+ final int[] permutation = internalData.permutation;
+ final double[][] weightedJacobian = internalData.weightedJacobian;
+ final double[] diagR = internalData.diagR;
+
+ // copy R and Qty to preserve input and initialize s
+ // in particular, save the diagonal elements of R in lmDir
+ for (int j = 0; j < solvedCols; ++j) {
+ int pj = permutation[j];
+ for (int i = j + 1; i < solvedCols; ++i) {
+ weightedJacobian[i][pj] = weightedJacobian[j][permutation[i]];
+ }
+ lmDir[j] = diagR[pj];
+ work[j] = qy[j];
+ }
+
+ // eliminate the diagonal matrix d using a Givens rotation
+ for (int j = 0; j < solvedCols; ++j) {
+
+ // prepare the row of d to be eliminated, locating the
+ // diagonal element using p from the Q.R. factorization
+ int pj = permutation[j];
+ double dpj = diag[pj];
+ if (dpj != 0) {
+ Arrays.fill(lmDiag, j + 1, lmDiag.length, 0);
+ }
+ lmDiag[j] = dpj;
+
+ // the transformations to eliminate the row of d
+ // modify only a single element of Qty
+ // beyond the first n, which is initially zero.
+ double qtbpj = 0;
+ for (int k = j; k < solvedCols; ++k) {
+ int pk = permutation[k];
+
+ // determine a Givens rotation which eliminates the
+ // appropriate element in the current row of d
+ if (lmDiag[k] != 0) {
+
+ final double sin;
+ final double cos;
+ double rkk = weightedJacobian[k][pk];
+ if (FastMath.abs(rkk) < FastMath.abs(lmDiag[k])) {
+ final double cotan = rkk / lmDiag[k];
+ sin = 1.0 / FastMath.sqrt(1.0 + cotan * cotan);
+ cos = sin * cotan;
+ } else {
+ final double tan = lmDiag[k] / rkk;
+ cos = 1.0 / FastMath.sqrt(1.0 + tan * tan);
+ sin = cos * tan;
+ }
+
+ // compute the modified diagonal element of R and
+ // the modified element of (Qty,0)
+ weightedJacobian[k][pk] = cos * rkk + sin * lmDiag[k];
+ final double temp = cos * work[k] + sin * qtbpj;
+ qtbpj = -sin * work[k] + cos * qtbpj;
+ work[k] = temp;
+
+ // accumulate the tranformation in the row of s
+ for (int i = k + 1; i < solvedCols; ++i) {
+ double rik = weightedJacobian[i][pk];
+ final double temp2 = cos * rik + sin * lmDiag[i];
+ lmDiag[i] = -sin * rik + cos * lmDiag[i];
+ weightedJacobian[i][pk] = temp2;
+ }
+ }
+ }
+
+ // store the diagonal element of s and restore
+ // the corresponding diagonal element of R
+ lmDiag[j] = weightedJacobian[j][permutation[j]];
+ weightedJacobian[j][permutation[j]] = lmDir[j];
+ }
+
+ // solve the triangular system for z, if the system is
+ // singular, then obtain a least squares solution
+ int nSing = solvedCols;
+ for (int j = 0; j < solvedCols; ++j) {
+ if ((lmDiag[j] == 0) && (nSing == solvedCols)) {
+ nSing = j;
+ }
+ if (nSing < solvedCols) {
+ work[j] = 0;
+ }
+ }
+ if (nSing > 0) {
+ for (int j = nSing - 1; j >= 0; --j) {
+ int pj = permutation[j];
+ double sum = 0;
+ for (int i = j + 1; i < nSing; ++i) {
+ sum += weightedJacobian[i][pj] * work[i];
+ }
+ work[j] = (work[j] - sum) / lmDiag[j];
+ }
+ }
+
+ // permute the components of z back to components of lmDir
+ for (int j = 0; j < lmDir.length; ++j) {
+ lmDir[permutation[j]] = work[j];
+ }
+ }
+
+ /**
+ * Decompose a matrix A as A.P = Q.R using Householder transforms.
+ * <p>As suggested in the P. Lascaux and R. Theodor book
+ * <i>Analyse num&eacute;rique matricielle appliqu&eacute;e &agrave;
+ * l'art de l'ing&eacute;nieur</i> (Masson, 1986), instead of representing
+ * the Householder transforms with u<sub>k</sub> unit vectors such that:
+ * <pre>
+ * H<sub>k</sub> = I - 2u<sub>k</sub>.u<sub>k</sub><sup>t</sup>
+ * </pre>
+ * we use <sub>k</sub> non-unit vectors such that:
+ * <pre>
+ * H<sub>k</sub> = I - beta<sub>k</sub>v<sub>k</sub>.v<sub>k</sub><sup>t</sup>
+ * </pre>
+ * where v<sub>k</sub> = a<sub>k</sub> - alpha<sub>k</sub> e<sub>k</sub>.
+ * The beta<sub>k</sub> coefficients are provided upon exit as recomputing
+ * them from the v<sub>k</sub> vectors would be costly.</p>
+ * <p>This decomposition handles rank deficient cases since the tranformations
+ * are performed in non-increasing columns norms order thanks to columns
+ * pivoting. The diagonal elements of the R matrix are therefore also in
+ * non-increasing absolute values order.</p>
+ *
+ * @param jacobian Weighted Jacobian matrix at the current point.
+ * @param solvedCols Number of solved point.
+ * @return data used in other methods of this class.
+ * @throws ConvergenceException if the decomposition cannot be performed.
+ */
+ private InternalData qrDecomposition(RealMatrix jacobian,
+ int solvedCols) throws ConvergenceException {
+ // Code in this class assumes that the weighted Jacobian is -(W^(1/2) J),
+ // hence the multiplication by -1.
+ final double[][] weightedJacobian = jacobian.scalarMultiply(-1).getData();
+
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ final int[] permutation = new int[nC];
+ final double[] diagR = new double[nC];
+ final double[] jacNorm = new double[nC];
+ final double[] beta = new double[nC];
+
+ // initializations
+ for (int k = 0; k < nC; ++k) {
+ permutation[k] = k;
+ double norm2 = 0;
+ for (int i = 0; i < nR; ++i) {
+ double akk = weightedJacobian[i][k];
+ norm2 += akk * akk;
+ }
+ jacNorm[k] = FastMath.sqrt(norm2);
+ }
+
+ // transform the matrix column after column
+ for (int k = 0; k < nC; ++k) {
+
+ // select the column with the greatest norm on active components
+ int nextColumn = -1;
+ double ak2 = Double.NEGATIVE_INFINITY;
+ for (int i = k; i < nC; ++i) {
+ double norm2 = 0;
+ for (int j = k; j < nR; ++j) {
+ double aki = weightedJacobian[j][permutation[i]];
+ norm2 += aki * aki;
+ }
+ if (Double.isInfinite(norm2) || Double.isNaN(norm2)) {
+ throw new ConvergenceException(LocalizedFormats.UNABLE_TO_PERFORM_QR_DECOMPOSITION_ON_JACOBIAN,
+ nR, nC);
+ }
+ if (norm2 > ak2) {
+ nextColumn = i;
+ ak2 = norm2;
+ }
+ }
+ if (ak2 <= qrRankingThreshold) {
+ return new InternalData(weightedJacobian, permutation, k, diagR, jacNorm, beta);
+ }
+ int pk = permutation[nextColumn];
+ permutation[nextColumn] = permutation[k];
+ permutation[k] = pk;
+
+ // choose alpha such that Hk.u = alpha ek
+ double akk = weightedJacobian[k][pk];
+ double alpha = (akk > 0) ? -FastMath.sqrt(ak2) : FastMath.sqrt(ak2);
+ double betak = 1.0 / (ak2 - akk * alpha);
+ beta[pk] = betak;
+
+ // transform the current column
+ diagR[pk] = alpha;
+ weightedJacobian[k][pk] -= alpha;
+
+ // transform the remaining columns
+ for (int dk = nC - 1 - k; dk > 0; --dk) {
+ double gamma = 0;
+ for (int j = k; j < nR; ++j) {
+ gamma += weightedJacobian[j][pk] * weightedJacobian[j][permutation[k + dk]];
+ }
+ gamma *= betak;
+ for (int j = k; j < nR; ++j) {
+ weightedJacobian[j][permutation[k + dk]] -= gamma * weightedJacobian[j][pk];
+ }
+ }
+ }
+
+ return new InternalData(weightedJacobian, permutation, solvedCols, diagR, jacNorm, beta);
+ }
+
+ /**
+ * Compute the product Qt.y for some Q.R. decomposition.
+ *
+ * @param y vector to multiply (will be overwritten with the result)
+ * @param internalData Data.
+ */
+ private void qTy(double[] y,
+ InternalData internalData) {
+ final double[][] weightedJacobian = internalData.weightedJacobian;
+ final int[] permutation = internalData.permutation;
+ final double[] beta = internalData.beta;
+
+ final int nR = weightedJacobian.length;
+ final int nC = weightedJacobian[0].length;
+
+ for (int k = 0; k < nC; ++k) {
+ int pk = permutation[k];
+ double gamma = 0;
+ for (int i = k; i < nR; ++i) {
+ gamma += weightedJacobian[i][pk] * y[i];
+ }
+ gamma *= beta[pk];
+ for (int i = k; i < nR; ++i) {
+ y[i] -= gamma * weightedJacobian[i][pk];
+ }
+ }
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/MultivariateJacobianFunction.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/MultivariateJacobianFunction.java
new file mode 100644
index 0000000..e673855
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/MultivariateJacobianFunction.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+import org.apache.commons.math3.util.Pair;
+
+/**
+ * A interface for functions that compute a vector of values and can compute their
+ * derivatives (Jacobian).
+ *
+ * @since 3.3
+ */
+public interface MultivariateJacobianFunction {
+
+ /**
+ * Compute the function value and its Jacobian.
+ *
+ * @param point the abscissae
+ * @return the values and their Jacobian of this vector valued function.
+ */
+ Pair<RealVector, RealMatrix> value(RealVector point);
+
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/OptimumImpl.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/OptimumImpl.java
new file mode 100644
index 0000000..698f86c
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/OptimumImpl.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresOptimizer.Optimum;
+import org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem.Evaluation;
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+
+/**
+ * A pedantic implementation of {@link Optimum}.
+ *
+ * @since 3.3
+ */
+class OptimumImpl implements Optimum {
+
+ /** abscissa and ordinate */
+ private final Evaluation value;
+ /** number of evaluations to compute this optimum */
+ private final int evaluations;
+ /** number of iterations to compute this optimum */
+ private final int iterations;
+
+ /**
+ * Construct an optimum from an evaluation and the values of the counters.
+ *
+ * @param value the function value
+ * @param evaluations number of times the function was evaluated
+ * @param iterations number of iterations of the algorithm
+ */
+ OptimumImpl(final Evaluation value, final int evaluations, final int iterations) {
+ this.value = value;
+ this.evaluations = evaluations;
+ this.iterations = iterations;
+ }
+
+ /* auto-generated implementations */
+
+ /** {@inheritDoc} */
+ public int getEvaluations() {
+ return evaluations;
+ }
+
+ /** {@inheritDoc} */
+ public int getIterations() {
+ return iterations;
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix getCovariances(double threshold) {
+ return value.getCovariances(threshold);
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getSigma(double covarianceSingularityThreshold) {
+ return value.getSigma(covarianceSingularityThreshold);
+ }
+
+ /** {@inheritDoc} */
+ public double getRMS() {
+ return value.getRMS();
+ }
+
+ /** {@inheritDoc} */
+ public RealMatrix getJacobian() {
+ return value.getJacobian();
+ }
+
+ /** {@inheritDoc} */
+ public double getCost() {
+ return value.getCost();
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getResiduals() {
+ return value.getResiduals();
+ }
+
+ /** {@inheritDoc} */
+ public RealVector getPoint() {
+ return value.getPoint();
+ }
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/ParameterValidator.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/ParameterValidator.java
new file mode 100644
index 0000000..d5b8529
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/ParameterValidator.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.linear.RealVector;
+
+/**
+ * Interface for validating a set of model parameters.
+ *
+ * @since 3.4
+ */
+public interface ParameterValidator {
+ /**
+ * Validates the set of parameters.
+ *
+ * @param params Input parameters.
+ * @return the validated values.
+ */
+ RealVector validate(RealVector params);
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/ValueAndJacobianFunction.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/ValueAndJacobianFunction.java
new file mode 100644
index 0000000..180e328
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/ValueAndJacobianFunction.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.commons.math3.fitting.leastsquares;
+
+import org.apache.commons.math3.linear.RealMatrix;
+import org.apache.commons.math3.linear.RealVector;
+
+/**
+ * A interface for functions that compute a vector of values and can compute their
+ * derivatives (Jacobian).
+ *
+ * @since 3.4
+ */
+public interface ValueAndJacobianFunction extends MultivariateJacobianFunction {
+ /**
+ * Compute the value.
+ *
+ * @param params Point.
+ * @return the value at the given point.
+ */
+ RealVector computeValue(final double[] params);
+
+ /**
+ * Compute the Jacobian.
+ *
+ * @param params Point.
+ * @return the Jacobian at the given point.
+ */
+ RealMatrix computeJacobian(final double[] params);
+}
diff --git a/src/main/java/org/apache/commons/math3/fitting/leastsquares/package-info.java b/src/main/java/org/apache/commons/math3/fitting/leastsquares/package-info.java
new file mode 100644
index 0000000..98623b5
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/leastsquares/package-info.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package provides algorithms that minimize the residuals
+ * between observations and model values.
+ * The {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresOptimizer
+ * least-squares optimizers} minimize the distance (called
+ * <em>cost</em> or <em>&chi;<sup>2</sup></em>) between model and
+ * observations.
+ *
+ * <br/>
+ * Algorithms in this category need access to a <em>problem</em>
+ * (represented by a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresProblem
+ * LeastSquaresProblem}).
+ * Such a model predicts a set of values which the algorithm tries to match
+ * with a set of given set of observed values.
+ * <br/>
+ * The problem can be created progressively using a {@link
+ * org.apache.commons.math3.fitting.leastsquares.LeastSquaresBuilder builder} or it can
+ * be created at once using a {@link org.apache.commons.math3.fitting.leastsquares.LeastSquaresFactory
+ * factory}.
+ * @since 3.3
+ */
+package org.apache.commons.math3.fitting.leastsquares;
diff --git a/src/main/java/org/apache/commons/math3/fitting/package-info.java b/src/main/java/org/apache/commons/math3/fitting/package-info.java
new file mode 100644
index 0000000..af00a6a
--- /dev/null
+++ b/src/main/java/org/apache/commons/math3/fitting/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Classes to perform curve fitting.
+ *
+ * <p>Curve fitting is a special case of a least-squares problem where the parameters are the
+ * coefficients of a function \( f \) whose graph \( y = f(x) \) should pass through sample points,
+ * and were the objective function is the squared sum of the residuals \( f(x_i) - y_i \) for
+ * observed points \( (x_i, y_i) \).
+ */
+package org.apache.commons.math3.fitting;