View Javadoc
1   /*
2    * Copyright (C) 2015 Alberto Irurueta Carro (alberto@irurueta.com)
3    *
4    * Licensed under the Apache License, Version 2.0 (the "License");
5    * you may not use this file except in compliance with the License.
6    * You may obtain a copy of the License at
7    *
8    *         http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  package com.irurueta.numerical.fitting;
17  
18  import com.irurueta.algebra.AlgebraException;
19  import com.irurueta.algebra.GaussJordanElimination;
20  import com.irurueta.algebra.Matrix;
21  import com.irurueta.algebra.Utils;
22  import com.irurueta.numerical.EvaluationException;
23  import com.irurueta.numerical.NotReadyException;
24  import com.irurueta.statistics.ChiSqDist;
25  import com.irurueta.statistics.MaxIterationsExceededException;
26  
27  import java.util.Arrays;
28  
29  /**
30   * Fits provided data (x, y) to a generic non-linear function using
31   * Levenberg-Marquardt iterative algorithm.
32   * This class is based on the implementation available at Numerical Recipes 3rd
33   * Ed, page 801.
34   */
35  @SuppressWarnings("DuplicatedCode")
36  public class LevenbergMarquardtMultiVariateFitter extends MultiVariateFitter {
37  
38      /**
39       * Default convergence parameter. Number of times that tolerance is assumed
40       * to be reached to consider that algorithm has finished iterating.
41       */
42      public static final int DEFAULT_NDONE = 4;
43  
44      /**
45       * Default maximum number of iterations.
46       */
47      public static final int DEFAULT_ITMAX = 5000;
48  
49      /**
50       * Default tolerance to reach convergence.
51       */
52      public static final double DEFAULT_TOL = 1e-3;
53  
54      /**
55       * Indicates whether covariance must be adjusted or not after fitting is finished.
56       */
57      public static final boolean DEFAULT_ADJUST_COVARIANCE = true;
58  
59      /**
60       * Convergence parameter.
61       */
62      private int ndone = DEFAULT_NDONE;
63  
64      /**
65       * Maximum number of iterations.
66       */
67      private int itmax = DEFAULT_ITMAX;
68  
69      /**
70       * Tolerance to reach convergence.
71       */
72      private double tol = DEFAULT_TOL;
73  
74      /**
75       * Evaluator of functions.
76       */
77      private LevenbergMarquardtMultiVariateFunctionEvaluator evaluator;
78  
79      /**
80       * Number of function parameters to be estimated.
81       */
82      private int ma;
83  
84      /**
85       * Determines which parameters can be modified during estimation (if true)
86       * and which ones are locked (if false).
87       */
88      private boolean[] ia;
89  
90      /**
91       * Curvature matrix.
92       */
93      private Matrix alpha;
94  
95      /**
96       * Number of parameters ot be fitted.
97       */
98      private int mfit = 0;
99  
100     /**
101      * An input point to be evaluated.
102      */
103     private double[] xRow;
104 
105     /**
106      * Results of function evaluations.
107      */
108     private double[] ymod;
109 
110     /**
111      * Jacobian of function at a given point.
112      */
113     Matrix jacobian;
114 
115     /**
116      * Mean square error.
117      */
118     private double mse = 0.0;
119 
120     /**
121      * Indicates whether covariance must be adjusted or not.
122      * When covariance adjustment is enabled, then covariance is recomputed taking
123      * into account input samples, input standard deviations of the samples and
124      * jacobians of the model function overestimated parameters using the following
125      * expression: Cov = (J'*W*J)^-1 where:
126      * Cov is the covariance of estimated parameters
127      * J is a matrix containing the Jacobians of the function overestimated parameters
128      * for each input parameter x. Each row of J matrix contains an evaluation of
129      * the model function Jacobian for i-th input parameter x. Each column of J matrix
130      * contains the partial derivative of model function over j-th estimated parameter.
131      * W is the inverse of input variances. It's a diagonal matrix containing the
132      * reciprocal of the input variances (squared input standard deviations). That is:
133      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
134      * the k-th standard deviation of input sample k.
135      * By default, covariance is adjusted after fitting finishes.
136      */
137     private boolean adjustCovariance = DEFAULT_ADJUST_COVARIANCE;
138 
139     /**
140      * Constructor.
141      */
142     public LevenbergMarquardtMultiVariateFitter() {
143         super();
144     }
145 
146     /**
147      * Constructor.
148      *
149      * @param x   input points x where multivariate function f(x1, x2, ...) is
150      *            evaluated.
151      * @param y   result of evaluation of linear single dimensional function f(x)
152      *            at provided multidimensional x points.
153      * @param sig standard deviations of each pair of points (x, y).
154      * @throws IllegalArgumentException if provided number of rows and arrays
155      *                                  don't have the same length.
156      */
157     public LevenbergMarquardtMultiVariateFitter(final Matrix x, final Matrix y, final double[] sig) {
158         super(x, y, sig);
159     }
160 
161     /**
162      * Constructor.
163      *
164      * @param x   input points x where multivariate function f(x1, x2, ...) is
165      *            evaluated.
166      * @param y   result of evaluation of linear single dimensional function f(x)
167      *            at provided multidimensional x points.
168      * @param sig standard deviation of all pair of points assuming that
169      *            standard deviations are constant.
170      * @throws IllegalArgumentException if provided number of rows and arrays
171      *                                  don't have the same length.
172      */
173     public LevenbergMarquardtMultiVariateFitter(final Matrix x, final Matrix y, final double sig) {
174         super(x, y, sig);
175     }
176 
177     /**
178      * Constructor.
179      *
180      * @param evaluator evaluator to evaluate function at provided point and
181      *                  obtain the evaluation of function basis at such point.
182      * @throws FittingException if evaluation fails.
183      */
184     public LevenbergMarquardtMultiVariateFitter(
185             final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator) throws FittingException {
186         this();
187         setFunctionEvaluator(evaluator);
188     }
189 
190     /**
191      * Constructor.
192      *
193      * @param evaluator evaluator to evaluate multivariate function at provided
194      *                  point and obtain the evaluation of function basis at such point.
195      * @param x         input points x where multivariate function f(x1, x2, ...) is
196      *                  evaluated.
197      * @param y         result of evaluation of linear single dimensional function f(x)
198      *                  at provided multidimensional x points.
199      * @param sig       standard deviations of each pair of points (x, y).
200      * @throws IllegalArgumentException if provided number of rows and arrays
201      *                                  don't have the same length.
202      * @throws FittingException         if evaluation fails.
203      */
204     public LevenbergMarquardtMultiVariateFitter(
205             final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator, final Matrix x, final Matrix y,
206             final double[] sig) throws FittingException {
207         this(x, y, sig);
208         setFunctionEvaluator(evaluator);
209     }
210 
211     /**
212      * Constructor.
213      *
214      * @param evaluator evaluator to evaluate multivariate function at provided
215      *                  point and obtain the evaluation of function basis at such point.
216      * @param x         input points x where multivariate function f(x1, x2, ...) is
217      *                  evaluated.
218      * @param y         result of evaluation of linear single dimensional function f(x)
219      *                  at provided multidimensional x points.
220      * @param sig       standard deviation of all pair of points assuming that
221      *                  standard deviations are constant.
222      * @throws IllegalArgumentException if provided number of rows and arrays
223      *                                  don't have the same length.
224      * @throws FittingException         if evaluation fails.
225      */
226     public LevenbergMarquardtMultiVariateFitter(
227             final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator, final Matrix x, final Matrix y,
228             final double sig) throws FittingException {
229         this(x, y, sig);
230         setFunctionEvaluator(evaluator);
231     }
232 
233     /**
234      * Returns convergence parameter.
235      *
236      * @return convergence parameter.
237      */
238     public int getNdone() {
239         return ndone;
240     }
241 
242     /**
243      * Sets convergence parameter.
244      *
245      * @param ndone convergence parameter.
246      * @throws IllegalArgumentException if provided value is less than 1.
247      */
248     public void setNdone(final int ndone) {
249         if (ndone < 1) {
250             throw new IllegalArgumentException();
251         }
252         this.ndone = ndone;
253     }
254 
255     /**
256      * Returns maximum number of iterations.
257      *
258      * @return maximum number of iterations.
259      */
260     public int getItmax() {
261         return itmax;
262     }
263 
264     /**
265      * Sets maximum number of iterations.
266      *
267      * @param itmax maximum number of iterations.
268      * @throws IllegalArgumentException if provided value is zero or negative.
269      */
270     public void setItmax(final int itmax) {
271         if (itmax <= 0) {
272             throw new IllegalArgumentException();
273         }
274         this.itmax = itmax;
275     }
276 
277     /**
278      * Returns tolerance to reach convergence.
279      *
280      * @return tolerance to reach convergence.
281      */
282     public double getTol() {
283         return tol;
284     }
285 
286     /**
287      * Sets tolerance to reach convergence.
288      *
289      * @param tol tolerance to reach convergence.
290      * @throws IllegalArgumentException if provided value is zero or negative.
291      */
292     public void setTol(final double tol) {
293         if (tol <= 0.0) {
294             throw new IllegalArgumentException();
295         }
296         this.tol = tol;
297     }
298 
299     /**
300      * Returns function evaluator to evaluate function at a given point and
301      * obtain function jacobian respect to each provided parameter.
302      *
303      * @return function evaluator.
304      */
305     public LevenbergMarquardtMultiVariateFunctionEvaluator getFunctionEvaluator() {
306         return evaluator;
307     }
308 
309     /**
310      * Sets function evaluator to evaluate function at a given point and obtain
311      * function jacobian respect to each provided parameter.
312      *
313      * @param evaluator function evaluator.
314      * @throws FittingException if evaluation fails.
315      */
316     public final void setFunctionEvaluator(
317             final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator) throws FittingException {
318         internalSetFunctionEvaluator(evaluator);
319     }
320 
321     /**
322      * Indicates whether provided instance has enough data to start the function
323      * fitting.
324      *
325      * @return true if this instance is ready to start the function fitting,
326      * false otherwise.
327      */
328     @Override
329     public boolean isReady() {
330         return evaluator != null && x != null && y != null && x.getRows() == y.getRows()
331                 && x.getColumns() == evaluator.getNumberOfDimensions()
332                 && y.getColumns() == evaluator.getNumberOfVariables();
333     }
334 
335     /**
336      * Returns curvature matrix.
337      *
338      * @return curvature matrix.
339      */
340     public Matrix getAlpha() {
341         return alpha;
342     }
343 
344     /**
345      * Returns degrees of freedom of computed chi square value.
346      * Degrees of freedom is equal to the number of sampled data minus the
347      * number of estimated parameters.
348      *
349      * @return degrees of freedom of computed chi square value.
350      */
351     public int getChisqDegreesOfFreedom() {
352         return ndat - ma;
353     }
354 
355     /**
356      * Gets reduced chi square value. This is equal to estimated chi square value divided by its degrees of
357      * freedom. Ideally this value should be close to 1.0, indicating that fit is optimal.
358      * A value larger than 1.0 indicates that fit is not good or noise has been underestimated, and a value smaller than
359      * 1.0 indicates that there is overfitting or noise has been overestimated.
360      *
361      * @return chi square value.
362      */
363     public double getReducedChisq() {
364         return getChisq() / getChisqDegreesOfFreedom();
365     }
366 
367     /**
368      * Gets mean square error produced by estimated parameters respect to
369      * provided sample data.
370      *
371      * @return mean square error.
372      */
373     public double getMse() {
374         return mse;
375     }
376 
377     /**
378      * Gets the probability of finding a smaller chi square value.
379      * The smaller the found chi square value is, the better the fit of the estimated
380      * parameters to the actual parameter.
381      * Thus, the smaller the chance of finding a smaller chi square value, then the
382      * better the estimated fit is.
383      *
384      * @return probability of finding a smaller chi square value (better fit), expressed
385      * as a value between 0.0 and 1.0.
386      * @throws MaxIterationsExceededException if convergence of incomplete
387      *                                        gamma function cannot be reached. This is rarely thrown and happens
388      *                                        usually for numerically unstable input values.
389      */
390     public double getP() throws MaxIterationsExceededException {
391         final var chisqDegreesOfFreedom = getChisqDegreesOfFreedom();
392         if (chisqDegreesOfFreedom <= 0) {
393             return 1.0;
394         }
395         return ChiSqDist.cdf(getChisq(), chisqDegreesOfFreedom);
396     }
397 
398     /**
399      * Gets a measure of quality of estimated fit as a value between 0.0 and 1.0.
400      * The larger the quality value is, the better the fit that has been estimated.
401      *
402      * @return measure of quality of estimated fit.
403      * @throws MaxIterationsExceededException if convergence of incomplete
404      *                                        gamma function cannot be reached. This is rarely thrown and happens
405      *                                        usually for numerically unstable input values.
406      */
407     public double getQ() throws MaxIterationsExceededException {
408         return 1.0 - getP();
409     }
410 
411     /**
412      * Indicates whether covariance must be adjusted or not.
413      * When covariance adjustment is enabled, then covariance is recomputed taking
414      * into account input samples, input standard deviations of the samples and
415      * jacobians of the model function overestimated parameters using the following
416      * expression: Cov = (J'*W*J)^-1 where:
417      * Cov is the covariance of estimated parameters
418      * J is a matrix containing the Jacobians of the function overestimated parameters
419      * for each input parameter x. Each row of J matrix contains an evaluation of
420      * the model function Jacobian for i-th input parameter x. Each column of J matrix
421      * contains the partial derivative of model function over j-th estimated parameter.
422      * W is the inverse of input variances. It's a diagonal matrix containing the
423      * reciprocal of the input variances (squared input standard deviations). That is:
424      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
425      * the k-th standard deviation of input sample k.
426      * By default, covariance is adjusted after fitting finishes.
427      * <a href="http://people.duke.edu/~hpgavin/ce281/lm.pdf">http://people.duke.edu/~hpgavin/ce281/lm.pdf</a>
428      * <a href="https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf">https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf</a>
429      * Numerical Recipes 3rd Ed, page 812
430      *
431      * @return true if covariance must be adjusted, false otherwise.
432      */
433     public boolean isCovarianceAdjusted() {
434         return adjustCovariance;
435     }
436 
437     /**
438      * Specifies whether covariance must be adjusted or not.
439      * When covariance adjustment is enabled, then covariance is recomputed taking
440      * into account input samples, input standard deviations of the samples and
441      * jacobians of the model function overestimated parameters using the following
442      * expression: Cov = (J'*W*J)^-1 where:
443      * Cov is the covariance of estimated parameters
444      * J is a matrix containing the Jacobians of the function overestimated parameters
445      * for each input parameter x. Each row of J matrix contains an evaluation of
446      * the model function Jacobian for i-th input parameter x. Each column of J matrix
447      * contains the partial derivative of model function over j-th estimated parameter.
448      * W is the inverse of input variances. It's a diagonal matrix containing the
449      * reciprocal of the input variances (squared input standard deviations). That is:
450      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
451      * the k-th standard deviation of input sample k.
452      * By default, covariance is adjusted after fitting finishes.
453      *
454      * @param adjustCovariance true if covariance must be adjusted, false otherwise.
455      */
456     public void setCovarianceAdjusted(final boolean adjustCovariance) {
457         this.adjustCovariance = adjustCovariance;
458     }
459 
460     /**
461      * Fits a function to provided data so that parameters associated to that
462      * function can be estimated along with their covariance matrix and chi
463      * square value.
464      * If chi square value is close to 1, the fit is usually good.
465      * If it is much larger, then error cannot be properly fitted.
466      * If it is close to zero, then the model over-fits the error.
467      * Methods {@link #getP()} and {@link #getQ()} can also be used to determine
468      * the quality of the fit.
469      *
470      * @throws FittingException  if fitting fails.
471      * @throws NotReadyException if enough input data has not yet been provided.
472      */
473     @Override
474     public void fit() throws FittingException, NotReadyException {
475         if (!isReady()) {
476             throw new NotReadyException();
477         }
478 
479         try {
480             resultAvailable = false;
481 
482             int j;
483             int k;
484             int l;
485             int iter;
486             int done = 0;
487             double alamda = 0.001;
488             double ochisq;
489             final var atry = new double[ma];
490             final var beta = new double[ma];
491             final var da = new double[ma];
492 
493             // number of parameters to be fitted
494             mfit = 0;
495             for (j = 0; j < ma; j++) {
496                 if (ia[j]) {
497                     mfit++;
498                 }
499             }
500 
501             final var oneda = new Matrix(mfit, 1);
502             final var temp = new Matrix(mfit, mfit);
503 
504             // initialization
505             mrqcof(a, alpha, beta);
506             System.arraycopy(a, 0, atry, 0, ma);
507 
508             ochisq = chisq;
509             for (iter = 0; iter < itmax; iter++) {
510 
511                 if (done == ndone) {
512                     // last pass. Use zero alamda
513                     alamda = 0.0;
514                 }
515 
516                 for (j = 0; j < mfit; j++) {
517                     // alter linearized fitting matrix, by augmenting diagonal
518                     // elements
519                     for (k = 0; k < mfit; k++) {
520                         covar.setElementAt(j, k, alpha.getElementAt(j, k));
521                     }
522                     covar.setElementAt(j, j, alpha.getElementAt(j, j) * (1.0 + alamda));
523                     for (k = 0; k < mfit; k++) {
524                         temp.setElementAt(j, k, covar.getElementAt(j, k));
525                     }
526                     oneda.setElementAt(j, 0, beta[j]);
527                 }
528 
529                 // matrix solution
530                 GaussJordanElimination.process(temp, oneda);
531 
532                 for (j = 0; j < mfit; j++) {
533                     for (k = 0; k < mfit; k++) {
534                         covar.setElementAt(j, k, temp.getElementAt(j, k));
535                     }
536                     da[j] = oneda.getElementAt(j, 0);
537                 }
538 
539                 if (done == ndone) {
540                     // Converged. Clean up and return
541                     covsrt(covar);
542                     covsrt(alpha);
543 
544                     if (adjustCovariance) {
545                         adjustCovariance();
546                     }
547 
548                     resultAvailable = true;
549 
550                     return;
551                 }
552 
553                 // did the trial succeed?
554                 for (j = 0, l = 0; l < ma; l++) {
555                     if (ia[l]) {
556                         atry[l] = a[l] + da[j++];
557                     }
558                 }
559 
560                 mrqcof(atry, covar, da);
561                 if (Math.abs(chisq - ochisq) < Math.max(tol, tol * chisq)) {
562                     done++;
563                 }
564 
565                 if (chisq < ochisq) {
566                     // success, accept the new solution
567                     alamda *= 0.1;
568                     ochisq = chisq;
569                     for (j = 0; j < mfit; j++) {
570                         for (k = 0; k < mfit; k++) {
571                             alpha.setElementAt(j, k, covar.getElementAt(j, k));
572                         }
573                         beta[j] = da[j];
574                     }
575                     System.arraycopy(atry, 0, a, 0, ma);
576                 } else {
577                     // failure, increase alamda
578                     alamda *= 10.0;
579                     chisq = ochisq;
580                 }
581             }
582 
583             // too many iterations
584             throw new FittingException("too many iterations");
585 
586         } catch (final AlgebraException | EvaluationException e) {
587             throw new FittingException(e);
588         }
589     }
590 
591     /**
592      * Prevents parameter at position i of linear combination of basis functions
593      * to be modified during function fitting.
594      *
595      * @param i   position of parameter to be retained.
596      * @param val value to be set for parameter at position i.
597      */
598     public void hold(final int i, final double val) {
599         ia[i] = false;
600         a[i] = val;
601     }
602 
603     /**
604      * Releases parameter at position i of linear combination of basis functions,
605      * so it can be modified again if needed.
606      *
607      * @param i position of parameter to be released.
608      */
609     public void free(final int i) {
610         ia[i] = true;
611     }
612 
613     /**
614      * Adjusts covariance.
615      * Covariance must be adjusted to produce more real results close to the scale
616      * of problem, otherwise estimated covariance will just be a measure of
617      * goodness similar to chi square value because it will be the inverse of
618      * the curvature matrix, which is just a solution of the covariance up to scale.
619      * <p>
620      * Covariance is adjusted taking into account input samples, input standard
621      * deviations of the samples and jacobians of the model function overestimated
622      * parameters using the following expression: Cov = (J'*W*J)^-1 where:
623      * Cov is the covariance of estimated parameters
624      * J is a matrix containing the Jacobians of the function overestimated parameters
625      * for each input parameter x. Each row of J matrix contains an evaluation of
626      * the model function Jacobian for i-th input parameter x. Each column of J matrix
627      * contains the partial derivative of model function over j-th estimated parameter.
628      * W is the inverse of input variances. It's a diagonal matrix containing the
629      * reciprocal of the input variances (squared input standard deviations). That is:
630      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
631      * the k-th standard deviation of input sample k.
632      *
633      * @throws AlgebraException    if there are numerical instabilities.
634      * @throws EvaluationException if function evaluation fails.
635      */
636     private void adjustCovariance() throws AlgebraException, EvaluationException {
637 
638         final var nVars = evaluator.getNumberOfVariables();
639         final var xCols = x.getColumns();
640         if (xRow == null) {
641             xRow = new double[x.getColumns()];
642         }
643         if (jacobian == null) {
644             jacobian = new Matrix(nVars, ma);
645         }
646 
647         final var jacobianTrans = new Matrix(ma, nVars);
648 
649         final var invCov = new Matrix(a.length, a.length);
650         final var tmpInvCov = new Matrix(a.length, a.length);
651         final var chiSqrDegreesOfFreedom = getChisqDegreesOfFreedom();
652         for (int i = 0; i < ndat; i++) {
653             x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow);
654 
655             evaluator.evaluate(i, xRow, ymod, a, jacobian);
656 
657             jacobian.transpose(jacobianTrans);
658 
659             jacobianTrans.multiply(jacobian, tmpInvCov);
660 
661             final var w = 1.0 / ((chiSqrDegreesOfFreedom + 1) * sig[i] * sig[i]);
662             tmpInvCov.multiplyByScalar(w);
663             invCov.add(tmpInvCov);
664         }
665 
666         covar = Utils.inverse(invCov);
667     }
668 
669     /**
670      * Internal method to set function evaluator to evaluate function at a given
671      * point and obtain function jacobian respect to each provided parameter.
672      *
673      * @param evaluator function evaluator.
674      * @throws FittingException if evaluation fails.
675      */
676     private void internalSetFunctionEvaluator(final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator)
677             throws FittingException {
678 
679         try {
680             this.evaluator = evaluator;
681 
682             if (evaluator != null) {
683                 a = evaluator.createInitialParametersArray();
684                 ma = a.length;
685                 covar = new Matrix(ma, ma);
686                 alpha = new Matrix(ma, ma);
687                 ia = new boolean[ma];
688                 Arrays.fill(ia, true);
689             }
690         } catch (final AlgebraException e) {
691             throw new FittingException(e);
692         }
693     }
694 
695     /**
696      * Used by fit to evaluate the linearized fitting matrix alpha, and vector
697      * beta to calculate chi square.
698      *
699      * @param a     estimated parameters so far.
700      * @param alpha curvature (i.e. fitting) matrix.
701      * @param beta  array where derivative increments for each parameter are
702      *              stored.
703      * @throws AlgebraException    if there are numerical instabilities.
704      * @throws EvaluationException if function evaluation fails.
705      */
706     private void mrqcof(final double[] a, final Matrix alpha, final double[] beta)
707             throws AlgebraException, EvaluationException {
708 
709         int i;
710         int j;
711         int k;
712         int l;
713         int m;
714         double wt;
715         double sig2i;
716         double dy;
717         final var nVars = evaluator.getNumberOfVariables();
718         if (jacobian == null) {
719             jacobian = new Matrix(nVars, ma);
720         }
721         if (xRow == null) {
722             xRow = new double[x.getColumns()];
723         }
724         final var xCols = evaluator.getNumberOfDimensions();
725         if (ymod == null) {
726             ymod = new double[nVars];
727         }
728 
729         for (j = 0; j < mfit; j++) {
730             for (k = 0; k <= j; k++) {
731                 alpha.setElementAt(j, k, 0.0);
732             }
733             beta[j] = 0.;
734         }
735 
736         chisq = 0.0;
737         mse = 0.0;
738         final var degreesOfFreedom = getChisqDegreesOfFreedom();
739         for (i = 0; i < ndat; i++) {
740             // summation loop over all data
741             x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow);
742             evaluator.evaluate(i, xRow, ymod, a, jacobian);
743             sig2i = 1.0 / (sig[i] * sig[i]);
744             for (int n = 0; n < nVars; n++) {
745                 dy = y.getElementAt(i, n) - ymod[n];
746                 for (j = 0, l = 0; l < ma; l++) {
747                     if (ia[l]) {
748                         wt = jacobian.getElementAt(n, l) * sig2i;
749                         final var alphaBuffer = alpha.getBuffer();
750                         for (k = 0, m = 0; m < l + 1; m++) {
751                             if (ia[m]) {
752                                 final var index = alpha.getIndex(j, k++);
753                                 alphaBuffer[index] += wt * jacobian.getElementAt(n, m);
754                             }
755                         }
756                         beta[j++] += dy * wt;
757                     }
758                 }
759 
760                 // add to mse
761                 mse += dy * dy / Math.abs(degreesOfFreedom);
762 
763                 // and find chi square
764                 chisq += dy * dy * sig2i / degreesOfFreedom;
765             }
766         }
767 
768         // fill in the symmetric side
769         for (j = 1; j < mfit; j++) {
770             for (k = 0; k < j; k++) {
771                 alpha.setElementAt(k, j, alpha.getElementAt(j, k));
772             }
773         }
774     }
775 
776     /**
777      * Expand in storage the covariance matrix covar, to take into account
778      * parameters that are being held fixed. (For the latter, return zero
779      * covariances).
780      *
781      * @param covar covariance matrix.
782      */
783     private void covsrt(final Matrix covar) {
784         int i;
785         int j;
786         int k;
787         for (i = mfit; i < ma; i++) {
788             for (j = 0; j < i + 1; j++) {
789                 covar.setElementAt(i, j, 0.0);
790                 covar.setElementAt(j, i, 0.0);
791             }
792         }
793 
794         k = mfit - 1;
795         for (j = ma - 1; j >= 0; j--) {
796             if (ia[j]) {
797                 final var buffer = covar.getBuffer();
798                 for (i = 0; i < ma; i++) {
799                     final var pos1 = covar.getIndex(i, k);
800                     final var pos2 = covar.getIndex(i, j);
801                     swap(buffer, buffer, pos1, pos2);
802                 }
803                 for (i = 0; i < ma; i++) {
804                     final var pos1 = covar.getIndex(k, i);
805                     final var pos2 = covar.getIndex(j, i);
806                     swap(buffer, buffer, pos1, pos2);
807                 }
808 
809                 k--;
810             }
811         }
812     }
813 
814     /**
815      * Swaps values of arrays at provided positions.
816      *
817      * @param array1 1st array.
818      * @param array2 2nd array.
819      * @param pos1   1st position.
820      * @param pos2   2nd position.
821      */
822     private static void swap(final double[] array1, final double[] array2, final int pos1, final int pos2) {
823         final var value1 = array1[pos1];
824         final var value2 = array2[pos2];
825         array1[pos1] = value2;
826         array2[pos2] = value1;
827     }
828 }