View Javadoc
1   /*
2    * Copyright (C) 2015 Alberto Irurueta Carro (alberto@irurueta.com)
3    *
4    * Licensed under the Apache License, Version 2.0 (the "License");
5    * you may not use this file except in compliance with the License.
6    * You may obtain a copy of the License at
7    *
8    *         http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  package com.irurueta.numerical.fitting;
17  
18  import com.irurueta.algebra.AlgebraException;
19  import com.irurueta.algebra.GaussJordanElimination;
20  import com.irurueta.algebra.Matrix;
21  import com.irurueta.algebra.Utils;
22  import com.irurueta.numerical.EvaluationException;
23  import com.irurueta.numerical.NotReadyException;
24  import com.irurueta.statistics.ChiSqDist;
25  import com.irurueta.statistics.MaxIterationsExceededException;
26  
27  import java.util.Arrays;
28  
29  /**
30   * Fits provided data (x,y) to a generic non-linear function using
31   * Levenberg-Marquardt iterative algorithm.
32   * This class is based on the implementation available at Numerical Recipes 3rd
33   * Ed, page 801.
34   */
35  @SuppressWarnings("DuplicatedCode")
36  public class LevenbergMarquardtSingleDimensionFitter extends SingleDimensionFitter {
37  
38      /**
39       * Default convergence parameter. Number of times that tolerance is assumed
40       * to be reached to consider that algorithm has finished iterating.
41       */
42      public static final int DEFAULT_NDONE = 4;
43  
44      /**
45       * Default maximum number of iterations.
46       */
47      public static final int DEFAULT_ITMAX = 5000;
48  
49      /**
50       * Default tolerance to reach convergence.
51       */
52      public static final double DEFAULT_TOL = 1e-3;
53  
54      /**
55       * Indicates whether covariance must be adjusted or not after fitting is finished.
56       */
57      public static final boolean DEFAULT_ADJUST_COVARIANCE = true;
58  
59      /**
60       * Convergence parameter.
61       */
62      private int ndone = DEFAULT_NDONE;
63  
64      /**
65       * Maximum number of iterations.
66       */
67      private int itmax = DEFAULT_ITMAX;
68  
69      /**
70       * Tolerance to reach convergence.
71       */
72      private double tol = DEFAULT_TOL;
73  
74      /**
75       * Evaluator of functions.
76       */
77      private LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator;
78  
79      /**
80       * Number of function parameters to be estimated.
81       */
82      private int ma;
83  
84      /**
85       * Determines which parameters can be modified during estimation (if true)
86       * and which ones are locked (if false).
87       */
88      private boolean[] ia;
89  
90      /**
91       * Curvature matrix.
92       */
93      private Matrix alpha;
94  
95      /**
96       * Number of parameters to be fitted.
97       */
98      private int mfit = 0;
99  
100     /**
101      * Mean square error.
102      */
103     private double mse = 0.0;
104 
105     /**
106      * Indicates whether covariance must be adjusted or not.
107      * When covariance adjustment is enabled, then covariance is recomputed taking
108      * into account input samples, input standard deviations of the samples and
109      * jacobians of the model function overestimated parameters using the following
110      * expression: Cov = (J'*W*J)^-1 where:
111      * Cov is the covariance of estimated parameters
112      * J is a matrix containing the Jacobians of the function overestimated parameters
113      * for each input parameter x. Each row of J matrix contains an evaluation of
114      * the model function Jacobian for i-th input parameter x. Each column of J matrix
115      * contains the partial derivative of model function over j-th estimated parameter.
116      * W is the inverse of input variances. It's a diagonal matrix containing the
117      * reciprocal of the input variances (squared input standard deviations). That is:
118      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
119      * the k-th standard deviation of input sample k.
120      * By default, covariance is adjusted after fitting finishes.
121      */
122     private boolean adjustCovariance = DEFAULT_ADJUST_COVARIANCE;
123 
124     /**
125      * Constructor.
126      */
127     public LevenbergMarquardtSingleDimensionFitter() {
128         super();
129     }
130 
131     /**
132      * Constructor.
133      *
134      * @param x   input points x where function f(x) is evaluated.
135      * @param y   result of evaluation of linear single dimensional function f(x)
136      *            at provided x points.
137      * @param sig standard deviations of each pair of points (x, y).
138      * @throws IllegalArgumentException if provided arrays don't have the same
139      *                                  length.
140      */
141     public LevenbergMarquardtSingleDimensionFitter(final double[] x, final double[] y, final double[] sig) {
142         super(x, y, sig);
143     }
144 
145     /**
146      * Constructor.
147      *
148      * @param x   input points x where function f(x) is evaluated.
149      * @param y   result of evaluation of linear single dimensional function f(x)
150      *            at provided x points.
151      * @param sig standard deviation of all pair of points assuming that
152      *            standard deviations are constant.
153      * @throws IllegalArgumentException if provided arrays don't have the same
154      *                                  length.
155      */
156     public LevenbergMarquardtSingleDimensionFitter(final double[] x, final double[] y, final double sig) {
157         super(x, y, sig);
158     }
159 
160     /**
161      * Constructor.
162      *
163      * @param evaluator evaluator to evaluate function at provided point and
164      *                  obtain the evaluation of function basis at such point.
165      * @throws FittingException if evaluation fails.
166      */
167     public LevenbergMarquardtSingleDimensionFitter(
168             final LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator) throws FittingException {
169         this();
170         setFunctionEvaluator(evaluator);
171     }
172 
173     /**
174      * Constructor.
175      *
176      * @param evaluator evaluator to evaluate function at provided point and
177      *                  obtain the evaluation of function basis at such point.
178      * @param x         input points x where function f(x) is evaluated.
179      * @param y         result of evaluation of linear single dimensional function f(x)
180      *                  at provided x points.
181      * @param sig       standard deviations of each pair of points (x, y).
182      * @throws IllegalArgumentException if provided arrays don't have the same
183      *                                  length.
184      * @throws FittingException         if evaluation fails.
185      */
186     public LevenbergMarquardtSingleDimensionFitter(
187             final LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator, final double[] x, final double[] y,
188             final double[] sig) throws FittingException {
189         this(x, y, sig);
190         setFunctionEvaluator(evaluator);
191     }
192 
193     /**
194      * Constructor.
195      *
196      * @param evaluator evaluator to evaluate function at provided point and
197      *                  obtain the evaluation of function basis at such point.
198      * @param x         input points x where function f(x) is evaluated.
199      * @param y         result of evaluation of linear single dimensional function f(x)
200      *                  at provided x points.
201      * @param sig       standard deviation of all pair of points assuming that
202      *                  standard deviations are constant.
203      * @throws IllegalArgumentException if provided arrays don't have the same
204      *                                  length.
205      * @throws FittingException         if evaluation fails.
206      */
207     public LevenbergMarquardtSingleDimensionFitter(
208             final LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator, final double[] x, final double[] y,
209             final double sig) throws FittingException {
210         this(x, y, sig);
211         setFunctionEvaluator(evaluator);
212     }
213 
214     /**
215      * Returns convergence parameter.
216      *
217      * @return convergence parameter.
218      */
219     public int getNdone() {
220         return ndone;
221     }
222 
223     /**
224      * Sets convergence parameter.
225      *
226      * @param ndone convergence parameter.
227      * @throws IllegalArgumentException if provided value is less than 1.
228      */
229     public void setNdone(final int ndone) {
230         if (ndone < 1) {
231             throw new IllegalArgumentException();
232         }
233         this.ndone = ndone;
234     }
235 
236     /**
237      * Returns maximum number of iterations.
238      *
239      * @return maximum number of iterations.
240      */
241     public int getItmax() {
242         return itmax;
243     }
244 
245     /**
246      * Sets maximum number of iterations.
247      *
248      * @param itmax maximum number of iterations.
249      * @throws IllegalArgumentException if provided value is zero or negative.
250      */
251     public void setItmax(final int itmax) {
252         if (itmax <= 0) {
253             throw new IllegalArgumentException();
254         }
255         this.itmax = itmax;
256     }
257 
258     /**
259      * Returns tolerance to reach convergence.
260      *
261      * @return tolerance to reach convergence.
262      */
263     public double getTol() {
264         return tol;
265     }
266 
267     /**
268      * Sets tolerance to reach convergence.
269      *
270      * @param tol tolerance to reach convergence.
271      * @throws IllegalArgumentException if provided value is zero or negative.
272      */
273     public void setTol(final double tol) {
274         if (tol <= 0.0) {
275             throw new IllegalArgumentException();
276         }
277         this.tol = tol;
278     }
279 
280     /**
281      * Returns function evaluator to evaluate function at a given point and
282      * obtain function derivatives respect to each provided parameter
283      *
284      * @return function evaluator
285      */
286     public LevenbergMarquardtSingleDimensionFunctionEvaluator getFunctionEvaluator() {
287         return evaluator;
288     }
289 
290     /**
291      * Sets function evaluator to evaluate function at a given point and obtain
292      * function derivatives respect to each provided parameter
293      *
294      * @param evaluator function evaluator
295      * @throws FittingException if evaluation fails
296      */
297     public final void setFunctionEvaluator(
298             final LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator) throws FittingException {
299         internalSetFunctionEvaluator(evaluator);
300     }
301 
302     /**
303      * Indicates whether provided instance has enough data to start the function
304      * fitting.
305      *
306      * @return true if this instance is ready to start the function fitting,
307      * false otherwise
308      */
309     @Override
310     public boolean isReady() {
311         return evaluator != null && x != null && y != null && x.length == y.length;
312     }
313 
314     /**
315      * Returns curvature matrix.
316      * Curvature matrix is an indicatiion o fht curvature of the error of the
317      * function being fitted on parameters dimensions.
318      * The larger the curvatures are, the more likely that parameters can be correctly
319      * fitted because a deep enough valley has been found to converge to an optimal
320      * solution.
321      * Typically, curvature is proportional to the inverse of the covariance matrix.
322      *
323      * @return curvature matrix.
324      */
325     public Matrix getAlpha() {
326         return alpha;
327     }
328 
329     /**
330      * Returns degrees of freedom of computed chi square value.
331      * Degrees of fredom is equal to the number of sampled data minus the
332      * number of estimated parameters.
333      *
334      * @return degrees of freedom of computed chi square value.
335      */
336     public int getChisqDegreesOfFreedom() {
337         return ndat - ma;
338     }
339 
340     /**
341      * Gets mean square error produced by estimated parameters respect to
342      * provided sample data.
343      *
344      * @return mean square error.
345      */
346     public double getMse() {
347         return mse;
348     }
349 
350     /**
351      * Gets the probability of finding a smaller chi square value.
352      * The smaller the found chi square value is, the better the fit of the estimated
353      * parameters to the actual parameter.
354      * Thus, the smaller the chance of finding a smaller chi square value, then the
355      * better the estimated fit is.
356      *
357      * @return probability of finding a smaller chi square value (better fit), expressed
358      * as a value between 0.0 and 1.0.
359      * @throws MaxIterationsExceededException if convergence of incomplete
360      *                                        gamma function cannot be reached. This is rarely thrown and happens
361      *                                        usually for numerically unstable input values.
362      */
363     public double getP() throws MaxIterationsExceededException {
364         return ChiSqDist.cdf(getChisq(), getChisqDegreesOfFreedom());
365     }
366 
367     /**
368      * Gets a measure of quality of estimated fit as a value between 0.0 and 1.0.
369      * The larger the quality value is, the better the fit that has been estimated.
370      *
371      * @return measure of quality of estimated fit.
372      * @throws MaxIterationsExceededException if convergence of incomplete
373      *                                        gamma function cannot be reached. This is rarely thrown and happens
374      *                                        usually for numerically unstable input values.
375      */
376     public double getQ() throws MaxIterationsExceededException {
377         return 1.0 - getP();
378     }
379 
380     /**
381      * Indicates whether covariance must be adjusted or not.
382      * When covariance adjustment is enabled, then covariance is recomputed taking
383      * into account input samples, input standard deviations of the samples and
384      * jacobians of the model function overestimated parameters using the following
385      * expression: Cov = (J'*W*J)^-1 where:
386      * Cov is the covariance of estimated parameters
387      * J is a matrix containing the Jacobians of the function overestimated parameters
388      * for each input parameter x. Each row of J matrix contains an evaluation of
389      * the model function Jacobian for i-th input parameter x. Each column of J matrix
390      * contains the partial derivative of model function over j-th estimated parameter.
391      * W is the inverse of input variances. It's a diagonal matrix containing the
392      * reciprocal of the input variances (squared input standard deviations). That is:
393      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
394      * the k-th standard deviation of input sample k.
395      * By default, covariance is adjusted after fitting finishes.
396      * More info about confidence os estimated parameters can be found here:
397      * <a href="http://people.duke.edu/~hpgavin/ce281/lm.pdf">http://people.duke.edu/~hpgavin/ce281/lm.pdf</a>
398      * <a href="https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf">https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf</a>
399      * Numerical Recipes 3rd Ed, page 812
400      *
401      * @return true if covariance must be adjusted, false otherwise.
402      */
403     public boolean isCovarianceAdjusted() {
404         return adjustCovariance;
405     }
406 
407     /**
408      * Specifies whether covariance must be adjusted or not.
409      * When covariance adjustment is enabled, then covariance is recomputed taking
410      * into account input samples, input standard deviations of the samples and
411      * jacobians of the model function overestimated parameters using the following
412      * expression: Cov = (J'*W*J)^-1 where:
413      * Cov is the covariance of estimated parameters
414      * J is a matrix containing the Jacobians of the function overestimated parameters
415      * for each input parameter x. Each row of J matrix contains an evaluation of
416      * the model function Jacobian for i-th input parameter x. Each column of J matrix
417      * contains the partial derivative of model function over j-th estimated parameter.
418      * W is the inverse of input variances. It's a diagonal matrix containing the
419      * reciprocal of the input variances (squared input standard deviations). That is:
420      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
421      * the k-th standard deviation of input sample k.
422      * By default, covariance is adjusted after fitting finishes.
423      *
424      * @param adjustCovariance true if covariance must be adjusted, false otherwise.
425      */
426     public void setCovarianceAdjusted(final boolean adjustCovariance) {
427         this.adjustCovariance = adjustCovariance;
428     }
429 
430     /**
431      * Fits a function to provided data so that parameters associated to that
432      * function can be estimated along with their covariance matrix and chi
433      * square value.
434      * If chi square value is close to 1, the fit is usually good.
435      * If it is much larger, then error cannot be properly fitted.
436      * If it is close to zero, then the model overfits the error.
437      * Methods {@link #getP()} and {@link #getQ()} can also be used to determine
438      * the quality of the fit.
439      *
440      * @throws FittingException  if fitting fails.
441      * @throws NotReadyException if enough input data has not yet been provided.
442      */
443     @Override
444     public void fit() throws FittingException, NotReadyException {
445         if (!isReady()) {
446             throw new NotReadyException();
447         }
448 
449         try {
450             resultAvailable = false;
451 
452             int j;
453             int k;
454             int l;
455             int iter;
456             int done = 0;
457             double alamda = 0.001;
458             double ochisq;
459             final var atry = new double[ma];
460             final var beta = new double[ma];
461             final var da = new double[ma];
462 
463             // number of parameters to be fitted
464             mfit = 0;
465             for (j = 0; j < ma; j++) {
466                 if (ia[j]) {
467                     mfit++;
468                 }
469             }
470 
471             final var oneda = new Matrix(mfit, 1);
472             final var temp = new Matrix(mfit, mfit);
473 
474             // initialization
475             mrqcof(a, alpha, beta);
476             System.arraycopy(a, 0, atry, 0, ma);
477 
478             ochisq = chisq;
479             for (iter = 0; iter < itmax; iter++) {
480 
481                 if (done == ndone) {
482                     // last pass. Use zero alamda
483                     alamda = 0.0;
484                 }
485 
486                 for (j = 0; j < mfit; j++) {
487                     // alter linearized fitting matrix, by augmenting diagonal
488                     // elements
489                     for (k = 0; k < mfit; k++) {
490                         covar.setElementAt(j, k, alpha.getElementAt(j, k));
491                     }
492                     covar.setElementAt(j, j, alpha.getElementAt(j, j) * (1.0 + alamda));
493                     for (k = 0; k < mfit; k++) {
494                         temp.setElementAt(j, k, covar.getElementAt(j, k));
495                     }
496                     oneda.setElementAt(j, 0, beta[j]);
497                 }
498 
499                 // matrix solution
500                 GaussJordanElimination.process(temp, oneda);
501 
502                 for (j = 0; j < mfit; j++) {
503                     for (k = 0; k < mfit; k++) {
504                         covar.setElementAt(j, k, temp.getElementAt(j, k));
505                     }
506                     da[j] = oneda.getElementAt(j, 0);
507                 }
508 
509                 if (done == ndone) {
510                     // Converged. Clean up and return
511                     covsrt(covar);
512                     covsrt(alpha);
513 
514                     if (adjustCovariance) {
515                         adjustCovariance();
516                     }
517 
518                     resultAvailable = true;
519 
520                     return;
521                 }
522 
523                 // did the trial succeed?
524                 for (j = 0, l = 0; l < ma; l++) {
525                     if (ia[l]) {
526                         atry[l] = a[l] + da[j++];
527                     }
528                 }
529 
530                 mrqcof(atry, covar, da);
531                 if (Math.abs(chisq - ochisq) < Math.max(tol, tol * chisq)) {
532                     done++;
533                 }
534 
535                 if (chisq < ochisq) {
536                     // success, accept the new solution
537                     alamda *= 0.1;
538                     ochisq = chisq;
539                     for (j = 0; j < mfit; j++) {
540                         for (k = 0; k < mfit; k++) {
541                             alpha.setElementAt(j, k, covar.getElementAt(j, k));
542                         }
543                         beta[j] = da[j];
544                     }
545                     System.arraycopy(atry, 0, a, 0, ma);
546                 } else {
547                     // failure, increase alamda
548                     alamda *= 10.0;
549                     chisq = ochisq;
550                 }
551             }
552 
553             // too many iterations
554             throw new FittingException("too many iterations");
555 
556         } catch (final AlgebraException | EvaluationException e) {
557             throw new FittingException(e);
558         }
559     }
560 
561     /**
562      * Prevents parameter at position i of linear combination of basis functions
563      * to be modified during function fitting.
564      *
565      * @param i   position of parameter to be retained.
566      * @param val value to be set for parameter at position i.
567      */
568     public void hold(final int i, final double val) {
569         ia[i] = false;
570         a[i] = val;
571     }
572 
573     /**
574      * Releases parameter at position i of linear combination of basis functions,
575      * so it can be modified again if needed.
576      *
577      * @param i position of parameter to be released.
578      */
579     public void free(final int i) {
580         ia[i] = true;
581     }
582 
583     /**
584      * Adjusts covariance.
585      * Covariance must be adjusted to produce more real results close to the scale
586      * of problem, otherwise estimated covariance will just be a measure of
587      * goodness similar to chi square value because it will be the inverse of
588      * the curvature matrix, which is just a solution of the covariance up to scale.
589      * <p>
590      * Covariance is adjusted taking into account input samples, input standard
591      * deviations of the samples and jacobians of the model function overestimated
592      * parameters using the following expression: Cov = (J'*W*J)^-1 where:
593      * Cov is the covariance of estimated parameters
594      * J is a matrix containing the Jacobians of the function overestimated parameters
595      * for each input parameter x. Each row of J matrix contains an evaluation of
596      * the model function Jacobian for i-th input parameter x. Each column of J matrix
597      * contains the partial derivative of model function over j-th estimated parameter.
598      * W is the inverse of input variances. It's a diagonal matrix containing the
599      * reciprocal of the input variances (squared input standard deviations). That is:
600      * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to
601      * the k-th standard deviation of input sample k.
602      *
603      * @throws AlgebraException    if there are numerical instabilities.
604      * @throws EvaluationException if function evaluation fails.
605      */
606     private void adjustCovariance() throws AlgebraException, EvaluationException {
607 
608         final var invCov = new Matrix(a.length, a.length);
609         final var tmp1 = new Matrix(a.length, 1);
610         final var tmp2 = new Matrix(1, a.length);
611         final var tmpInvCov = new Matrix(a.length, a.length);
612         final var derivatives = new double[a.length];
613         final var chiSqrDegreesOfFreedom = getChisqDegreesOfFreedom();
614         for (var i = 0; i < ndat; i++) {
615             evaluator.evaluate(i, x[i], a, derivatives);
616 
617             tmp1.fromArray(derivatives);
618             tmp2.fromArray(derivatives);
619 
620             tmp1.multiply(tmp2, tmpInvCov);
621 
622             final var w = 1.0 / ((chiSqrDegreesOfFreedom + 1) * sig[i] * sig[i]);
623             tmpInvCov.multiplyByScalar(w);
624             invCov.add(tmpInvCov);
625         }
626 
627         covar = Utils.inverse(invCov);
628     }
629 
630     /**
631      * Internal method to set function evaluator to evaluate function at a given
632      * point and obtain function derivatives respect to each provided parameter
633      *
634      * @param evaluator function evaluator
635      * @throws FittingException if evaluation fails
636      */
637     private void internalSetFunctionEvaluator(LevenbergMarquardtSingleDimensionFunctionEvaluator evaluator)
638             throws FittingException {
639 
640         try {
641             this.evaluator = evaluator;
642 
643             if (evaluator != null) {
644                 a = evaluator.createInitialParametersArray();
645                 ma = a.length;
646                 covar = new Matrix(ma, ma);
647                 alpha = new Matrix(ma, ma);
648                 ia = new boolean[ma];
649                 Arrays.fill(ia, true);
650             }
651         } catch (final AlgebraException e) {
652             throw new FittingException(e);
653         }
654     }
655 
656     /**
657      * Used by {@link #fit()} to evaluate the linearized fitting matrix alpha, and
658      * vector beta to calculate chi square.
659      *
660      * @param a     estimated parameters so far.
661      * @param alpha curvature (i.e. fitting) matrix.
662      * @param beta  array where derivative increments for each parameter are
663      *              stored.
664      * @throws EvaluationException if function evaluation fails.
665      */
666     private void mrqcof(final double[] a, final Matrix alpha, final double[] beta)
667             throws EvaluationException {
668 
669         int i;
670         int j;
671         int k;
672         int l;
673         int m;
674         double ymod;
675         double wt;
676         double sig2i;
677         double dy;
678         final var dyda = new double[ma];
679 
680         // initialize (symmetric) alpha, beta
681         for (j = 0; j < mfit; j++) {
682             for (k = 0; k <= j; k++) {
683                 alpha.setElementAt(j, k, 0.0);
684             }
685             beta[j] = 0.0;
686         }
687 
688         chisq = 0.0;
689         mse = 0.0;
690         final var degreesOfFreedom = getChisqDegreesOfFreedom();
691         for (i = 0; i < ndat; i++) {
692             // summation loop over all data
693             ymod = evaluator.evaluate(i, x[i], a, dyda);
694 
695             sig2i = 1.0 / (sig[i] * sig[i]);
696             dy = y[i] - ymod;
697             for (j = 0, l = 0; l < ma; l++) {
698                 if (ia[l]) {
699                     wt = dyda[l] * sig2i;
700                     final var alphaBuffer = alpha.getBuffer();
701                     for (k = 0, m = 0; m < l + 1; m++) {
702                         if (ia[m]) {
703                             final var index = alpha.getIndex(j, k++);
704                             alphaBuffer[index] += wt * dyda[m];
705                         }
706                     }
707                     beta[j++] += dy * wt;
708                 }
709             }
710 
711             // add to mse
712             mse += dy * dy / Math.abs(degreesOfFreedom);
713 
714             // and find chi square
715             chisq += dy * dy * sig2i / degreesOfFreedom;
716         }
717 
718         // fill in the symmetric side
719         for (j = 1; j < mfit; j++) {
720             for (k = 0; k < j; k++) {
721                 alpha.setElementAt(k, j, alpha.getElementAt(j, k));
722             }
723         }
724     }
725 
726     /**
727      * Expand in storage the covariance matrix covar, to take into account
728      * parameters that are being held fixed. (For the latter, return zero
729      * covariances).
730      *
731      * @param covar covariance matrix.
732      */
733     private void covsrt(final Matrix covar) {
734         int i;
735         int j;
736         int k;
737         for (i = mfit; i < ma; i++) {
738             for (j = 0; j < i + 1; j++) {
739                 covar.setElementAt(i, j, 0.0);
740                 covar.setElementAt(j, i, 0.0);
741             }
742         }
743 
744         k = mfit - 1;
745         for (j = ma - 1; j >= 0; j--) {
746             if (ia[j]) {
747                 final var buffer = covar.getBuffer();
748                 for (i = 0; i < ma; i++) {
749                     final var pos1 = covar.getIndex(i, k);
750                     final var pos2 = covar.getIndex(i, j);
751                     swap(buffer, buffer, pos1, pos2);
752                 }
753 
754                 for (i = 0; i < ma; i++) {
755                     final var pos1 = covar.getIndex(k, i);
756                     final var pos2 = covar.getIndex(j, i);
757                     swap(buffer, buffer, pos1, pos2);
758                 }
759 
760                 k--;
761             }
762         }
763     }
764 
765     /**
766      * Swaps values of arrays at provided positions.
767      *
768      * @param array1 1st array.
769      * @param array2 2nd array.
770      * @param pos1   1st position.
771      * @param pos2   2nd position.
772      */
773     private static void swap(final double[] array1, final double[] array2, final int pos1, final int pos2) {
774         final var value1 = array1[pos1];
775         final var value2 = array2[pos2];
776         array1[pos1] = value2;
777         array2[pos2] = value1;
778     }
779 }