View Javadoc
1   /*
2    * Copyright (C) 2015 Alberto Irurueta Carro (alberto@irurueta.com)
3    *
4    * Licensed under the Apache License, Version 2.0 (the "License");
5    * you may not use this file except in compliance with the License.
6    * You may obtain a copy of the License at
7    *
8    *         http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  package com.irurueta.numerical.fitting;
17  
18  import com.irurueta.algebra.AlgebraException;
19  import com.irurueta.algebra.GaussJordanElimination;
20  import com.irurueta.algebra.Matrix;
21  import com.irurueta.algebra.Utils;
22  import com.irurueta.numerical.EvaluationException;
23  import com.irurueta.numerical.NotReadyException;
24  import com.irurueta.statistics.ChiSqDist;
25  import com.irurueta.statistics.MaxIterationsExceededException;
26  
27  import java.util.Arrays;
28  
29  /**
30   * Fits provided data (x, y) to a generic non-linear function using
31   * Levenberg-Marquardt iterative algorithm.
32   * This class is based on the implementation available at Numerical Recipes 3rd
33   * Ed, page 801.
34   */
35  @SuppressWarnings("DuplicatedCode")
36  public class LevenbergMarquardtMultiDimensionFitter extends MultiDimensionFitter {
37  
38      /**
39       * Default convergence parameter. Number of times that tolerance is assumed
40       * to be reached to consider that algorithm has finished iterating.
41       */
42      public static final int DEFAULT_NDONE = 4;
43  
44      /**
45       * Default maximum number of iterations.
46       */
47      public static final int DEFAULT_ITMAX = 5000;
48  
49      /**
50       * Default tolerance to reach convergence.
51       */
52      public static final double DEFAULT_TOL = 1e-3;
53  
54      /**
55       * Indicates whether covariance must be adjusted or not after fitting is finished.
56       */
57      public static final boolean DEFAULT_ADJUST_COVARIANCE = true;
58  
59      /**
60       * Convergence parameter.
61       */
62      private int ndone = DEFAULT_NDONE;
63  
64      /**
65       * Maximum number of iterations.
66       */
67      private int itmax = DEFAULT_ITMAX;
68  
69      /**
70       * Tolerance to reach convergence.
71       */
72      private double tol = DEFAULT_TOL;
73  
74      /**
75       * Evaluator of functions.
76       */
77      private LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator;
78  
79      /**
80       * Number of function parameters to be estimated.
81       */
82      private int ma;
83  
84      /**
85       * Determines which parameters can be modified during estimation (if true)
86       * and which ones are locked (if false).
87       */
88      private boolean[] ia;
89  
90      /**
91       * Curvature matrix.
92       */
93      private Matrix alpha;
94  
95      /**
96       * Number of parameters ot be fitted.
97       */
98      private int mfit = 0;
99  
100     /**
101      * An input point to be evaluated.
102      * This is internally reused for memory efficiency.
103      */
104     private double[] xRow;
105 
106     /**
107      * Mean square error.
108      */
109     private double mse = 0.0;
110 
111     /**
112      * Indicates whether covariance must be adjusted or not.
113      * When covariance adjustment is enabled, then covariance is recomputed taking
114      * into account input samples, input standard deviations of the samples and
115      * jacobians of the model function overestimated parameters using the following
116      * expression: Cov = (J'*W*J)^-1 where:
117      * Cov is the covariance of estimated parameters
118      * J is a matrix containing the Jacobians of the function overestimated parameters
119      * for each input parameter x. Each row of J matrix contains an evaluation of
120      * the model function Jacobian for i-th input parameter x. Each column of J matrix
121      * contains the partial derivative of model function over j-th estimated parameter.
122      * W is the inverse of input variances. It's a diagonal matrix containing the
123      * reciprocal of the input variances (squared input standard deviations). That is:
124      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
125      * the k-th standard deviation of input sample k.
126      * By default, covariance is adjusted after fitting finishes.
127      */
128     private boolean adjustCovariance = DEFAULT_ADJUST_COVARIANCE;
129 
130     /**
131      * Constructor.
132      */
133     public LevenbergMarquardtMultiDimensionFitter() {
134         super();
135     }
136 
137     /**
138      * Constructor.
139      *
140      * @param x   input points x where function f(x1, x2, ...) is evaluated.
141      * @param y   result of evaluation of linear single dimensional function f(x)
142      *            at provided multidimensional x points.
143      * @param sig standard deviations of each pair of points (x, y).
144      * @throws IllegalArgumentException if provided arrays don't have the same
145      *                                  length.
146      */
147     public LevenbergMarquardtMultiDimensionFitter(final Matrix x, final double[] y, final double[] sig) {
148         super(x, y, sig);
149     }
150 
151     /**
152      * Constructor.
153      *
154      * @param x   input points x where function f(x1, x2, ...) is evaluated.
155      * @param y   result of evaluation of linear single dimensional function f(x)
156      *            at provided multidimensional x points.
157      * @param sig standard deviation of all pair of points assuming that
158      *            standard deviations are constant.
159      * @throws IllegalArgumentException if provided arrays don't have the same
160      *                                  length .
161      */
162     public LevenbergMarquardtMultiDimensionFitter(final Matrix x, final double[] y, final double sig) {
163         super(x, y, sig);
164     }
165 
166     /**
167      * Constructor.
168      *
169      * @param evaluator evaluator to evaluate function at provided point and
170      *                  obtain the evaluation of function basis at such point.
171      * @throws FittingException if evaluation fails.
172      */
173     public LevenbergMarquardtMultiDimensionFitter(
174             final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator) throws FittingException {
175         this();
176         setFunctionEvaluator(evaluator);
177     }
178 
179     /**
180      * Constructor.
181      *
182      * @param evaluator evaluator to evaluate function at provided point and
183      *                  obtain the evaluation of function basis at such point.
184      * @param x         input points x where function f(x1, x2, ...) is evaluated.
185      * @param y         result of evaluation of linear single dimensional function f(x)
186      *                  at provided multidimensional x points.
187      * @param sig       standard deviations of each pair of points (x, y).
188      * @throws IllegalArgumentException if provided arrays don't have the same
189      *                                  length.
190      * @throws FittingException         if evaluation fails.
191      */
192     public LevenbergMarquardtMultiDimensionFitter(
193             final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator,
194             final Matrix x, final double[] y, final double[] sig) throws FittingException {
195         this(x, y, sig);
196         setFunctionEvaluator(evaluator);
197     }
198 
199     /**
200      * Constructor.
201      *
202      * @param evaluator evaluator to evaluate function at provided point and
203      *                  obtain the evaluation of function basis at such point.
204      * @param x         input points x where function f(x1, x2, ...) is evaluated.
205      * @param y         result of evaluation of linear single dimensional function f(x)
206      *                  at provided multidimensional x points.
207      * @param sig       standard deviation of all pair of points assuming that
208      *                  standard deviations are constant.
209      * @throws IllegalArgumentException if provided arrays don't have the same
210      *                                  length .
211      * @throws FittingException         if evaluation fails.
212      */
213     public LevenbergMarquardtMultiDimensionFitter(
214             final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator,
215             final Matrix x, final double[] y, final double sig) throws FittingException {
216         this(x, y, sig);
217         setFunctionEvaluator(evaluator);
218     }
219 
220     /**
221      * Returns convergence parameter.
222      *
223      * @return convergence parameter.
224      */
225     public int getNdone() {
226         return ndone;
227     }
228 
229     /**
230      * Sets convergence parameter.
231      *
232      * @param ndone convergence parameter.
233      * @throws IllegalArgumentException if provided value is less than 1.
234      */
235     public void setNdone(final int ndone) {
236         if (ndone < 1) {
237             throw new IllegalArgumentException();
238         }
239         this.ndone = ndone;
240     }
241 
242     /**
243      * Returns maximum number of iterations.
244      *
245      * @return maximum number of iterations.
246      */
247     public int getItmax() {
248         return itmax;
249     }
250 
251     /**
252      * Sets maximum number of iterations.
253      *
254      * @param itmax maximum number of iterations.
255      * @throws IllegalArgumentException if provided value is zero or negative.
256      */
257     public void setItmax(final int itmax) {
258         if (itmax <= 0) {
259             throw new IllegalArgumentException();
260         }
261         this.itmax = itmax;
262     }
263 
264     /**
265      * Returns tolerance to reach convergence.
266      *
267      * @return tolerance to reach convergence.
268      */
269     public double getTol() {
270         return tol;
271     }
272 
273     /**
274      * Sets tolerance to reach convergence.
275      *
276      * @param tol tolerance to reach convergence.
277      * @throws IllegalArgumentException if provided value is zero or negative.
278      */
279     public void setTol(final double tol) {
280         if (tol <= 0.0) {
281             throw new IllegalArgumentException();
282         }
283         this.tol = tol;
284     }
285 
286     /**
287      * Returns function evaluator to evaluate function at a given point and
288      * obtain function derivatives respect to each provided parameter.
289      *
290      * @return function evaluator.
291      */
292     public LevenbergMarquardtMultiDimensionFunctionEvaluator getFunctionEvaluator() {
293         return evaluator;
294     }
295 
296     /**
297      * Sets function evaluator to evaluate function at a given point and obtain
298      * function derivatives respect to each provided parameter.
299      *
300      * @param evaluator function evaluator.
301      * @throws FittingException if evaluation fails.
302      */
303     public final void setFunctionEvaluator(
304             final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator) throws FittingException {
305         internalSetFunctionEvaluator(evaluator);
306     }
307 
308     /**
309      * Indicates whether provided instance has enough data to start the function
310      * fitting.
311      *
312      * @return true if this instance is ready to start the function fitting,
313      * false otherwise.
314      */
315     @Override
316     public boolean isReady() {
317         return evaluator != null && x != null && y != null && x.getRows() == y.length
318                 && x.getColumns() == evaluator.getNumberOfDimensions();
319     }
320 
321     /**
322      * Returns curvature matrix.
323      *
324      * @return curvature matrix.
325      */
326     public Matrix getAlpha() {
327         return alpha;
328     }
329 
330     /**
331      * Returns degrees of freedom of computed chi square value.
332      * Degrees of freedom is equal to the number of sampled data minus the
333      * number of estimated parameters.
334      *
335      * @return degrees of freedom of computed chi square value.
336      */
337     public int getChisqDegreesOfFreedom() {
338         return ndat - ma;
339     }
340 
341     /**
342      * Gets reduced chi square value. This is equal to estimated chi square value divided by its degrees of
343      * freedom. Ideally this value should be close to 1.0, indicating that fit is optimal.
344      * A value larger than 1.0 indicates that fit is not good or noise has been underestimated, and a value smaller than
345      * 1.0 indicates that there is overfitting or noise has been overestimated.
346      *
347      * @return chi square value.
348      */
349     public double getReducedChisq() {
350         return getChisq() / getChisqDegreesOfFreedom();
351     }
352 
353     /**
354      * Gets mean square error produced by estimated parameters respect to
355      * provided sample data.
356      *
357      * @return mean square error.
358      */
359     public double getMse() {
360         return mse;
361     }
362 
363     /**
364      * Gets the probability of finding a smaller chi square value.
365      * The smaller the found chi square value is, the better the fit of the estimated
366      * parameters to the actual parameter.
367      * Thus, the smaller the chance of finding a smaller chi square value, then the
368      * better the estimated fit is.
369      *
370      * @return probability of finding a smaller chi square value (better fit), expressed
371      * as a value between 0.0 and 1.0.
372      * @throws MaxIterationsExceededException if convergence of incomplete
373      *                                        gamma function cannot be reached. This is rarely thrown and happens
374      *                                        usually for numerically unstable input values.
375      */
376     public double getP() throws MaxIterationsExceededException {
377         final var chisqDegreesOfFreedom = getChisqDegreesOfFreedom();
378         if (chisqDegreesOfFreedom <= 0) {
379             return 1.0;
380         }
381         return ChiSqDist.cdf(getChisq(), getChisqDegreesOfFreedom());
382     }
383 
384     /**
385      * Gets a measure of quality of estimated fit as a value between 0.0 and 1.0.
386      * The larger the quality value is, the better the fit that has been estimated.
387      *
388      * @return measure of quality of estimated fit.
389      * @throws MaxIterationsExceededException if convergence of incomplete
390      *                                        gamma function cannot be reached. This is rarely thrown and happens
391      *                                        usually for numerically unstable input values.
392      */
393     public double getQ() throws MaxIterationsExceededException {
394         return 1.0 - getP();
395     }
396 
397     /**
398      * Indicates whether covariance must be adjusted or not.
399      * When covariance adjustment is enabled, then covariance is recomputed taking
400      * into account input samples, input standard deviations of the samples and
401      * jacobians of the model function overestimated parameters using the following
402      * expression: Cov = (J'*W*J)^-1 where:
403      * Cov is the covariance of estimated parameters
404      * J is a matrix containing the Jacobians of the function overestimated parameters
405      * for each input parameter x. Each row of J matrix contains an evaluation of
406      * the model function Jacobian for i-th input parameter x. Each column of J matrix
407      * contains the partial derivative of model function over j-th estimated parameter.
408      * W is the inverse of input variances. It's a diagonal matrix containing the
409      * reciprocal of the input variances (squared input standard deviations). That is:
410      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
411      * the k-th standard deviation of input sample k.
412      * By default, covariance is adjusted after fitting finishes.
413      * <a href="http://people.duke.edu/~hpgavin/ce281/lm.pdf">http://people.duke.edu/~hpgavin/ce281/lm.pdf</a>
414      * <a href="https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf">https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf</a>
415      * Numerical Recipes 3rd Ed, page 812
416      *
417      * @return true if covariance must be adjusted, false otherwise.
418      */
419     public boolean isCovarianceAdjusted() {
420         return adjustCovariance;
421     }
422 
423     /**
424      * Specifies whether covariance must be adjusted or not.
425      * When covariance adjustment is enabled, then covariance is recomputed taking
426      * into account input samples, input standard deviations of the samples and
427      * jacobians of the model function overestimated parameters using the following
428      * expression: Cov = (J'*W*J)^-1 where:
429      * Cov is the covariance of estimated parameters
430      * J is a matrix containing the Jacobians of the function overestimated parameters
431      * for each input parameter x. Each row of J matrix contains an evaluation of
432      * the model function Jacobian for i-th input parameter x. Each column of J matrix
433      * contains the partial derivative of model function over j-th estimated parameter.
434      * W is the inverse of input variances. It's a diagonal matrix containing the
435      * reciprocal of the input variances (squared input standard deviations). That is:
436      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
437      * the k-th standard deviation of input sample k.
438      * By default, covariance is adjusted after fitting finishes.
439      *
440      * @param adjustCovariance true if covariance must be adjusted, false otherwise.
441      */
442     public void setCovarianceAdjusted(final boolean adjustCovariance) {
443         this.adjustCovariance = adjustCovariance;
444     }
445 
446     /**
447      * Fits a function to provided data so that parameters associated to that
448      * function can be estimated along with their covariance matrix and chi
449      * square value.
450      * If chi square value is close to 1, the fit is usually good.
451      * If it is much larger, then error cannot be properly fitted.
452      * If it is close to zero, then the model over-fits the error.
453      * Methods {@link #getP()} and {@link #getQ()} can also be used to determine
454      * the quality of the fit.
455      *
456      * @throws FittingException  if fitting fails.
457      * @throws NotReadyException if enough input data has not yet been provided.
458      */
459     @Override
460     public void fit() throws FittingException, NotReadyException {
461         if (!isReady()) {
462             throw new NotReadyException();
463         }
464 
465         try {
466             resultAvailable = false;
467 
468             int j;
469             int k;
470             int l;
471             int iter;
472             int done = 0;
473             var alamda = 0.001;
474             double ochisq;
475             final var atry = new double[ma];
476             final var beta = new double[ma];
477             final var da = new double[ma];
478 
479             // number of parameters to be fitted
480             mfit = 0;
481             for (j = 0; j < ma; j++) {
482                 if (ia[j]) {
483                     mfit++;
484                 }
485             }
486 
487             final var oneda = new Matrix(mfit, 1);
488             final var temp = new Matrix(mfit, mfit);
489 
490             // initialization
491             mrqcof(a, alpha, beta);
492             System.arraycopy(a, 0, atry, 0, ma);
493 
494             ochisq = chisq;
495             for (iter = 0; iter < itmax; iter++) {
496 
497                 if (done == ndone) {
498                     // last pass. Use zero alamda
499                     alamda = 0.0;
500                 }
501 
502                 for (j = 0; j < mfit; j++) {
503                     // alter linearized fitting matrix, by augmenting diagonal
504                     // elements
505                     for (k = 0; k < mfit; k++) {
506                         covar.setElementAt(j, k, alpha.getElementAt(j, k));
507                     }
508                     covar.setElementAt(j, j, alpha.getElementAt(j, j) * (1.0 + alamda));
509                     for (k = 0; k < mfit; k++) {
510                         temp.setElementAt(j, k, covar.getElementAt(j, k));
511                     }
512                     oneda.setElementAt(j, 0, beta[j]);
513                 }
514 
515                 // matrix solution
516                 GaussJordanElimination.process(temp, oneda);
517 
518                 for (j = 0; j < mfit; j++) {
519                     for (k = 0; k < mfit; k++) {
520                         covar.setElementAt(j, k, temp.getElementAt(j, k));
521                     }
522                     da[j] = oneda.getElementAt(j, 0);
523                 }
524 
525                 if (done == ndone) {
526                     // Converged. Clean up and return
527                     covsrt(covar);
528                     covsrt(alpha);
529 
530                     if (adjustCovariance) {
531                         adjustCovariance();
532                     }
533 
534                     resultAvailable = true;
535 
536                     return;
537                 }
538 
539                 // did the trial succeed?
540                 for (j = 0, l = 0; l < ma; l++) {
541                     if (ia[l]) {
542                         atry[l] = a[l] + da[j++];
543                     }
544                 }
545 
546                 mrqcof(atry, covar, da);
547                 if (Math.abs(chisq - ochisq) < Math.max(tol, tol * chisq)) {
548                     done++;
549                 }
550 
551                 if (chisq < ochisq) {
552                     // success, accept the new solution
553                     alamda *= 0.1;
554                     ochisq = chisq;
555                     for (j = 0; j < mfit; j++) {
556                         for (k = 0; k < mfit; k++) {
557                             alpha.setElementAt(j, k, covar.getElementAt(j, k));
558                         }
559                         beta[j] = da[j];
560                     }
561                     System.arraycopy(atry, 0, a, 0, ma);
562                 } else {
563                     // failure, increase alamda
564                     alamda *= 10.0;
565                     chisq = ochisq;
566                 }
567             }
568 
569             // too many iterations
570             throw new FittingException("too many iterations");
571 
572         } catch (final AlgebraException | EvaluationException e) {
573             throw new FittingException(e);
574         }
575     }
576 
577     /**
578      * Prevents parameter at position i of linear combination of basis functions
579      * to be modified during function fitting.
580      *
581      * @param i   position of parameter to be retained.
582      * @param val value to be set for parameter at position i.
583      */
584     public void hold(final int i, final double val) {
585         ia[i] = false;
586         a[i] = val;
587     }
588 
589     /**
590      * Releases parameter at position i of linear combination of basis functions,
591      * so it can be modified again if needed.
592      *
593      * @param i position of parameter to be released.
594      */
595     public void free(final int i) {
596         ia[i] = true;
597     }
598 
599     /**
600      * Adjusts covariance.
601      * Covariance must be adjusted to produce more real results close to the scale
602      * of problem, otherwise estimated covariance will just be a measure of
603      * goodness similar to chi square value because it will be the inverse of
604      * the curvature matrix, which is just a solution of the covariance up to scale.
605      * <p>
606      * Covariance is adjusted taking into account input samples, input standard
607      * deviations of the samples and jacobians of the model function overestimated
608      * parameters using the following expression: Cov = (J'*W*J)^-1 where:
609      * Cov is the covariance of estimated parameters
610      * J is a matrix containing the Jacobians of the function overestimated parameters
611      * for each input parameter x. Each row of J matrix contains an evaluation of
612      * the model function Jacobian for i-th input parameter x. Each column of J matrix
613      * contains the partial derivative of model function over j-th estimated parameter.
614      * W is the inverse of input variances. It's a diagonal matrix containing the
615      * reciprocal of the input variances (squared input standard deviations). That is:
616      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
617      * the k-th standard deviation of input sample k.
618      *
619      * @throws AlgebraException    if there are numerical instabilities.
620      * @throws EvaluationException if function evaluation fails.
621      */
622     private void adjustCovariance() throws AlgebraException, EvaluationException {
623 
624         final var xCols = x.getColumns();
625         if (xRow == null) {
626             xRow = new double[x.getColumns()];
627         }
628 
629         final var invCov = new Matrix(a.length, a.length);
630         final var tmp1 = new Matrix(a.length, 1);
631         final var tmp2 = new Matrix(1, a.length);
632         final var tmpInvCov = new Matrix(a.length, a.length);
633         final var derivatives = new double[a.length];
634         final var chiSqrDegreesOfFreedom = getChisqDegreesOfFreedom();
635         for (var i = 0; i < ndat; i++) {
636             x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow);
637 
638             evaluator.evaluate(i, xRow, a, derivatives);
639 
640             tmp1.fromArray(derivatives);
641             tmp2.fromArray(derivatives);
642 
643             tmp1.multiply(tmp2, tmpInvCov);
644 
645             final var w = 1.0 / ((chiSqrDegreesOfFreedom + 1) * sig[i] * sig[i]);
646             tmpInvCov.multiplyByScalar(w);
647             invCov.add(tmpInvCov);
648         }
649 
650         covar = Utils.inverse(invCov);
651     }
652 
653     /**
654      * Internal method to set function evaluator to evaluate function at a given
655      * point and obtain function derivatives respect to each provided parameter.
656      *
657      * @param evaluator function evaluator.
658      * @throws FittingException if evaluation fails.
659      */
660     private void internalSetFunctionEvaluator(final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator)
661             throws FittingException {
662 
663         try {
664             this.evaluator = evaluator;
665 
666             if (evaluator != null) {
667                 a = evaluator.createInitialParametersArray();
668                 ma = a.length;
669                 covar = new Matrix(ma, ma);
670                 alpha = new Matrix(ma, ma);
671                 ia = new boolean[ma];
672                 Arrays.fill(ia, true);
673             }
674         } catch (final AlgebraException e) {
675             throw new FittingException(e);
676         }
677     }
678 
679     /**
680      * Used by fit to evaluate the linearized fitting matrix alpha, and vector
681      * beta to calculate chi square.
682      *
683      * @param a     estimated parameters so far.
684      * @param alpha curvature (i.e. fitting) matrix.
685      * @param beta  array where derivative increments for each parameter are
686      *              stored.
687      * @throws AlgebraException    if there are numerical instabilities.
688      * @throws EvaluationException if function evaluation fails.
689      */
690     private void mrqcof(final double[] a, final Matrix alpha, final double[] beta)
691             throws AlgebraException, EvaluationException {
692 
693         int i;
694         int j;
695         int k;
696         int l;
697         int m;
698         double ymod;
699         double wt;
700         double sig2i;
701         double dy;
702         final var dyda = new double[ma];
703 
704         if (xRow == null) {
705             xRow = new double[x.getColumns()];
706         }
707         final var xCols = evaluator.getNumberOfDimensions();
708 
709         // initialize (symmetric) alpha, beta
710         for (j = 0; j < mfit; j++) {
711             for (k = 0; k <= j; k++) {
712                 alpha.setElementAt(j, k, 0.0);
713             }
714             beta[j] = 0.;
715         }
716 
717         chisq = 0.;
718         mse = 0.0;
719         final var degreesOfFreedom = getChisqDegreesOfFreedom();
720         for (i = 0; i < ndat; i++) {
721             // summation loop over all data
722             x.getSubmatrixAsArray(i, 0, i, xCols - 1,
723                     xRow);
724             ymod = evaluator.evaluate(i, xRow, a, dyda);
725 
726             sig2i = 1.0 / (sig[i] * sig[i]);
727             dy = y[i] - ymod;
728             for (j = 0, l = 0; l < ma; l++) {
729                 if (ia[l]) {
730                     wt = dyda[l] * sig2i;
731                     final var alphaBuffer = alpha.getBuffer();
732                     for (k = 0, m = 0; m < l + 1; m++) {
733                         if (ia[m]) {
734                             final int index = alpha.getIndex(j, k++);
735                             alphaBuffer[index] += wt * dyda[m];
736                         }
737                     }
738                     beta[j++] += dy * wt;
739                 }
740             }
741 
742             // add to mse
743             mse += dy * dy / Math.abs(degreesOfFreedom);
744 
745             // and find chi square
746             chisq += dy * dy * sig2i / degreesOfFreedom;
747         }
748 
749         // fill in the symmetric side
750         for (j = 1; j < mfit; j++) {
751             for (k = 0; k < j; k++) {
752                 alpha.setElementAt(k, j, alpha.getElementAt(j, k));
753             }
754         }
755     }
756 
757     /**
758      * Expand in storage the covariance matrix covar, to take into account
759      * parameters that are being held fixed. (For the latter, return zero
760      * covariances).
761      *
762      * @param covar covariance matrix.
763      */
764     private void covsrt(final Matrix covar) {
765         int i;
766         int j;
767         int k;
768         for (i = mfit; i < ma; i++) {
769             for (j = 0; j < i + 1; j++) {
770                 covar.setElementAt(i, j, 0.0);
771                 covar.setElementAt(j, i, 0.0);
772             }
773         }
774 
775         k = mfit - 1;
776         for (j = ma - 1; j >= 0; j--) {
777             if (ia[j]) {
778                 final var buffer = covar.getBuffer();
779                 for (i = 0; i < ma; i++) {
780                     final var pos1 = covar.getIndex(i, k);
781                     final var pos2 = covar.getIndex(i, j);
782                     swap(buffer, buffer, pos1, pos2);
783                 }
784                 for (i = 0; i < ma; i++) {
785                     final var pos1 = covar.getIndex(k, i);
786                     final var pos2 = covar.getIndex(j, i);
787                     swap(buffer, buffer, pos1, pos2);
788                 }
789 
790                 k--;
791             }
792         }
793     }
794 
795     /**
796      * Swaps values of arrays at provided positions.
797      *
798      * @param array1 1st array.
799      * @param array2 2nd array.
800      * @param pos1   1st position.
801      * @param pos2   2nd position.
802      */
803     private static void swap(final double[] array1, final double[] array2, final int pos1, final int pos2) {
804         final var value1 = array1[pos1];
805         final var value2 = array2[pos2];
806         array1[pos1] = value2;
807         array2[pos2] = value1;
808     }
809 }