1 /* 2 * Copyright (C) 2015 Alberto Irurueta Carro (alberto@irurueta.com) 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 package com.irurueta.numerical.fitting; 17 18 import com.irurueta.algebra.AlgebraException; 19 import com.irurueta.algebra.GaussJordanElimination; 20 import com.irurueta.algebra.Matrix; 21 import com.irurueta.algebra.Utils; 22 import com.irurueta.numerical.EvaluationException; 23 import com.irurueta.numerical.NotReadyException; 24 import com.irurueta.statistics.ChiSqDist; 25 import com.irurueta.statistics.MaxIterationsExceededException; 26 27 import java.util.Arrays; 28 29 /** 30 * Fits provided data (x, y) to a generic non-linear function using 31 * Levenberg-Marquardt iterative algorithm. 32 * This class is based on the implementation available at Numerical Recipes 3rd 33 * Ed, page 801. 34 */ 35 @SuppressWarnings("DuplicatedCode") 36 public class LevenbergMarquardtMultiDimensionFitter extends MultiDimensionFitter { 37 38 /** 39 * Default convergence parameter. Number of times that tolerance is assumed 40 * to be reached to consider that algorithm has finished iterating. 41 */ 42 public static final int DEFAULT_NDONE = 4; 43 44 /** 45 * Default maximum number of iterations. 46 */ 47 public static final int DEFAULT_ITMAX = 5000; 48 49 /** 50 * Default tolerance to reach convergence. 51 */ 52 public static final double DEFAULT_TOL = 1e-3; 53 54 /** 55 * Indicates whether covariance must be adjusted or not after fitting is finished. 56 */ 57 public static final boolean DEFAULT_ADJUST_COVARIANCE = true; 58 59 /** 60 * Convergence parameter. 61 */ 62 private int ndone = DEFAULT_NDONE; 63 64 /** 65 * Maximum number of iterations. 66 */ 67 private int itmax = DEFAULT_ITMAX; 68 69 /** 70 * Tolerance to reach convergence. 71 */ 72 private double tol = DEFAULT_TOL; 73 74 /** 75 * Evaluator of functions. 76 */ 77 private LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator; 78 79 /** 80 * Number of function parameters to be estimated. 81 */ 82 private int ma; 83 84 /** 85 * Determines which parameters can be modified during estimation (if true) 86 * and which ones are locked (if false). 87 */ 88 private boolean[] ia; 89 90 /** 91 * Curvature matrix. 92 */ 93 private Matrix alpha; 94 95 /** 96 * Number of parameters ot be fitted. 97 */ 98 private int mfit = 0; 99 100 /** 101 * An input point to be evaluated. 102 * This is internally reused for memory efficiency. 103 */ 104 private double[] xRow; 105 106 /** 107 * Mean square error. 108 */ 109 private double mse = 0.0; 110 111 /** 112 * Indicates whether covariance must be adjusted or not. 113 * When covariance adjustment is enabled, then covariance is recomputed taking 114 * into account input samples, input standard deviations of the samples and 115 * jacobians of the model function overestimated parameters using the following 116 * expression: Cov = (J'*W*J)^-1 where: 117 * Cov is the covariance of estimated parameters 118 * J is a matrix containing the Jacobians of the function overestimated parameters 119 * for each input parameter x. Each row of J matrix contains an evaluation of 120 * the model function Jacobian for i-th input parameter x. Each column of J matrix 121 * contains the partial derivative of model function over j-th estimated parameter. 122 * W is the inverse of input variances. It's a diagonal matrix containing the 123 * reciprocal of the input variances (squared input standard deviations). That is: 124 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 125 * the k-th standard deviation of input sample k. 126 * By default, covariance is adjusted after fitting finishes. 127 */ 128 private boolean adjustCovariance = DEFAULT_ADJUST_COVARIANCE; 129 130 /** 131 * Constructor. 132 */ 133 public LevenbergMarquardtMultiDimensionFitter() { 134 super(); 135 } 136 137 /** 138 * Constructor. 139 * 140 * @param x input points x where function f(x1, x2, ...) is evaluated. 141 * @param y result of evaluation of linear single dimensional function f(x) 142 * at provided multidimensional x points. 143 * @param sig standard deviations of each pair of points (x, y). 144 * @throws IllegalArgumentException if provided arrays don't have the same 145 * length. 146 */ 147 public LevenbergMarquardtMultiDimensionFitter(final Matrix x, final double[] y, final double[] sig) { 148 super(x, y, sig); 149 } 150 151 /** 152 * Constructor. 153 * 154 * @param x input points x where function f(x1, x2, ...) is evaluated. 155 * @param y result of evaluation of linear single dimensional function f(x) 156 * at provided multidimensional x points. 157 * @param sig standard deviation of all pair of points assuming that 158 * standard deviations are constant. 159 * @throws IllegalArgumentException if provided arrays don't have the same 160 * length . 161 */ 162 public LevenbergMarquardtMultiDimensionFitter(final Matrix x, final double[] y, final double sig) { 163 super(x, y, sig); 164 } 165 166 /** 167 * Constructor. 168 * 169 * @param evaluator evaluator to evaluate function at provided point and 170 * obtain the evaluation of function basis at such point. 171 * @throws FittingException if evaluation fails. 172 */ 173 public LevenbergMarquardtMultiDimensionFitter( 174 final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator) throws FittingException { 175 this(); 176 setFunctionEvaluator(evaluator); 177 } 178 179 /** 180 * Constructor. 181 * 182 * @param evaluator evaluator to evaluate function at provided point and 183 * obtain the evaluation of function basis at such point. 184 * @param x input points x where function f(x1, x2, ...) is evaluated. 185 * @param y result of evaluation of linear single dimensional function f(x) 186 * at provided multidimensional x points. 187 * @param sig standard deviations of each pair of points (x, y). 188 * @throws IllegalArgumentException if provided arrays don't have the same 189 * length. 190 * @throws FittingException if evaluation fails. 191 */ 192 public LevenbergMarquardtMultiDimensionFitter( 193 final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator, 194 final Matrix x, final double[] y, final double[] sig) throws FittingException { 195 this(x, y, sig); 196 setFunctionEvaluator(evaluator); 197 } 198 199 /** 200 * Constructor. 201 * 202 * @param evaluator evaluator to evaluate function at provided point and 203 * obtain the evaluation of function basis at such point. 204 * @param x input points x where function f(x1, x2, ...) is evaluated. 205 * @param y result of evaluation of linear single dimensional function f(x) 206 * at provided multidimensional x points. 207 * @param sig standard deviation of all pair of points assuming that 208 * standard deviations are constant. 209 * @throws IllegalArgumentException if provided arrays don't have the same 210 * length . 211 * @throws FittingException if evaluation fails. 212 */ 213 public LevenbergMarquardtMultiDimensionFitter( 214 final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator, 215 final Matrix x, final double[] y, final double sig) throws FittingException { 216 this(x, y, sig); 217 setFunctionEvaluator(evaluator); 218 } 219 220 /** 221 * Returns convergence parameter. 222 * 223 * @return convergence parameter. 224 */ 225 public int getNdone() { 226 return ndone; 227 } 228 229 /** 230 * Sets convergence parameter. 231 * 232 * @param ndone convergence parameter. 233 * @throws IllegalArgumentException if provided value is less than 1. 234 */ 235 public void setNdone(final int ndone) { 236 if (ndone < 1) { 237 throw new IllegalArgumentException(); 238 } 239 this.ndone = ndone; 240 } 241 242 /** 243 * Returns maximum number of iterations. 244 * 245 * @return maximum number of iterations. 246 */ 247 public int getItmax() { 248 return itmax; 249 } 250 251 /** 252 * Sets maximum number of iterations. 253 * 254 * @param itmax maximum number of iterations. 255 * @throws IllegalArgumentException if provided value is zero or negative. 256 */ 257 public void setItmax(final int itmax) { 258 if (itmax <= 0) { 259 throw new IllegalArgumentException(); 260 } 261 this.itmax = itmax; 262 } 263 264 /** 265 * Returns tolerance to reach convergence. 266 * 267 * @return tolerance to reach convergence. 268 */ 269 public double getTol() { 270 return tol; 271 } 272 273 /** 274 * Sets tolerance to reach convergence. 275 * 276 * @param tol tolerance to reach convergence. 277 * @throws IllegalArgumentException if provided value is zero or negative. 278 */ 279 public void setTol(final double tol) { 280 if (tol <= 0.0) { 281 throw new IllegalArgumentException(); 282 } 283 this.tol = tol; 284 } 285 286 /** 287 * Returns function evaluator to evaluate function at a given point and 288 * obtain function derivatives respect to each provided parameter. 289 * 290 * @return function evaluator. 291 */ 292 public LevenbergMarquardtMultiDimensionFunctionEvaluator getFunctionEvaluator() { 293 return evaluator; 294 } 295 296 /** 297 * Sets function evaluator to evaluate function at a given point and obtain 298 * function derivatives respect to each provided parameter. 299 * 300 * @param evaluator function evaluator. 301 * @throws FittingException if evaluation fails. 302 */ 303 public final void setFunctionEvaluator( 304 final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator) throws FittingException { 305 internalSetFunctionEvaluator(evaluator); 306 } 307 308 /** 309 * Indicates whether provided instance has enough data to start the function 310 * fitting. 311 * 312 * @return true if this instance is ready to start the function fitting, 313 * false otherwise. 314 */ 315 @Override 316 public boolean isReady() { 317 return evaluator != null && x != null && y != null && x.getRows() == y.length 318 && x.getColumns() == evaluator.getNumberOfDimensions(); 319 } 320 321 /** 322 * Returns curvature matrix. 323 * 324 * @return curvature matrix. 325 */ 326 public Matrix getAlpha() { 327 return alpha; 328 } 329 330 /** 331 * Returns degrees of freedom of computed chi square value. 332 * Degrees of freedom is equal to the number of sampled data minus the 333 * number of estimated parameters. 334 * 335 * @return degrees of freedom of computed chi square value. 336 */ 337 public int getChisqDegreesOfFreedom() { 338 return ndat - ma; 339 } 340 341 /** 342 * Gets mean square error produced by estimated parameters respect to 343 * provided sample data. 344 * 345 * @return mean square error. 346 */ 347 public double getMse() { 348 return mse; 349 } 350 351 /** 352 * Gets the probability of finding a smaller chi square value. 353 * The smaller the found chi square value is, the better the fit of the estimated 354 * parameters to the actual parameter. 355 * Thus, the smaller the chance of finding a smaller chi square value, then the 356 * better the estimated fit is. 357 * 358 * @return probability of finding a smaller chi square value (better fit), expressed 359 * as a value between 0.0 and 1.0. 360 * @throws MaxIterationsExceededException if convergence of incomplete 361 * gamma function cannot be reached. This is rarely thrown and happens 362 * usually for numerically unstable input values. 363 */ 364 public double getP() throws MaxIterationsExceededException { 365 return ChiSqDist.cdf(getChisq(), getChisqDegreesOfFreedom()); 366 } 367 368 /** 369 * Gets a measure of quality of estimated fit as a value between 0.0 and 1.0. 370 * The larger the quality value is, the better the fit that has been estimated. 371 * 372 * @return measure of quality of estimated fit. 373 * @throws MaxIterationsExceededException if convergence of incomplete 374 * gamma function cannot be reached. This is rarely thrown and happens 375 * usually for numerically unstable input values. 376 */ 377 public double getQ() throws MaxIterationsExceededException { 378 return 1.0 - getP(); 379 } 380 381 /** 382 * Indicates whether covariance must be adjusted or not. 383 * When covariance adjustment is enabled, then covariance is recomputed taking 384 * into account input samples, input standard deviations of the samples and 385 * jacobians of the model function overestimated parameters using the following 386 * expression: Cov = (J'*W*J)^-1 where: 387 * Cov is the covariance of estimated parameters 388 * J is a matrix containing the Jacobians of the function overestimated parameters 389 * for each input parameter x. Each row of J matrix contains an evaluation of 390 * the model function Jacobian for i-th input parameter x. Each column of J matrix 391 * contains the partial derivative of model function over j-th estimated parameter. 392 * W is the inverse of input variances. It's a diagonal matrix containing the 393 * reciprocal of the input variances (squared input standard deviations). That is: 394 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 395 * the k-th standard deviation of input sample k. 396 * By default, covariance is adjusted after fitting finishes. 397 * <a href="http://people.duke.edu/~hpgavin/ce281/lm.pdf">http://people.duke.edu/~hpgavin/ce281/lm.pdf</a> 398 * <a href="https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf">https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf</a> 399 * Numerical Recipes 3rd Ed, page 812 400 * 401 * @return true if covariance must be adjusted, false otherwise. 402 */ 403 public boolean isCovarianceAdjusted() { 404 return adjustCovariance; 405 } 406 407 /** 408 * Specifies whether covariance must be adjusted or not. 409 * When covariance adjustment is enabled, then covariance is recomputed taking 410 * into account input samples, input standard deviations of the samples and 411 * jacobians of the model function overestimated parameters using the following 412 * expression: Cov = (J'*W*J)^-1 where: 413 * Cov is the covariance of estimated parameters 414 * J is a matrix containing the Jacobians of the function overestimated parameters 415 * for each input parameter x. Each row of J matrix contains an evaluation of 416 * the model function Jacobian for i-th input parameter x. Each column of J matrix 417 * contains the partial derivative of model function over j-th estimated parameter. 418 * W is the inverse of input variances. It's a diagonal matrix containing the 419 * reciprocal of the input variances (squared input standard deviations). That is: 420 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 421 * the k-th standard deviation of input sample k. 422 * By default, covariance is adjusted after fitting finishes. 423 * 424 * @param adjustCovariance true if covariance must be adjusted, false otherwise. 425 */ 426 public void setCovarianceAdjusted(final boolean adjustCovariance) { 427 this.adjustCovariance = adjustCovariance; 428 } 429 430 /** 431 * Fits a function to provided data so that parameters associated to that 432 * function can be estimated along with their covariance matrix and chi 433 * square value. 434 * If chi square value is close to 1, the fit is usually good. 435 * If it is much larger, then error cannot be properly fitted. 436 * If it is close to zero, then the model over-fits the error. 437 * Methods {@link #getP()} and {@link #getQ()} can also be used to determine 438 * the quality of the fit. 439 * 440 * @throws FittingException if fitting fails. 441 * @throws NotReadyException if enough input data has not yet been provided. 442 */ 443 @Override 444 public void fit() throws FittingException, NotReadyException { 445 if (!isReady()) { 446 throw new NotReadyException(); 447 } 448 449 try { 450 resultAvailable = false; 451 452 int j; 453 int k; 454 int l; 455 int iter; 456 int done = 0; 457 var alamda = 0.001; 458 double ochisq; 459 final var atry = new double[ma]; 460 final var beta = new double[ma]; 461 final var da = new double[ma]; 462 463 // number of parameters to be fitted 464 mfit = 0; 465 for (j = 0; j < ma; j++) { 466 if (ia[j]) { 467 mfit++; 468 } 469 } 470 471 final var oneda = new Matrix(mfit, 1); 472 final var temp = new Matrix(mfit, mfit); 473 474 // initialization 475 mrqcof(a, alpha, beta); 476 System.arraycopy(a, 0, atry, 0, ma); 477 478 ochisq = chisq; 479 for (iter = 0; iter < itmax; iter++) { 480 481 if (done == ndone) { 482 // last pass. Use zero alamda 483 alamda = 0.0; 484 } 485 486 for (j = 0; j < mfit; j++) { 487 // alter linearized fitting matrix, by augmenting diagonal 488 // elements 489 for (k = 0; k < mfit; k++) { 490 covar.setElementAt(j, k, alpha.getElementAt(j, k)); 491 } 492 covar.setElementAt(j, j, alpha.getElementAt(j, j) * (1.0 + alamda)); 493 for (k = 0; k < mfit; k++) { 494 temp.setElementAt(j, k, covar.getElementAt(j, k)); 495 } 496 oneda.setElementAt(j, 0, beta[j]); 497 } 498 499 // matrix solution 500 GaussJordanElimination.process(temp, oneda); 501 502 for (j = 0; j < mfit; j++) { 503 for (k = 0; k < mfit; k++) { 504 covar.setElementAt(j, k, temp.getElementAt(j, k)); 505 } 506 da[j] = oneda.getElementAt(j, 0); 507 } 508 509 if (done == ndone) { 510 // Converged. Clean up and return 511 covsrt(covar); 512 covsrt(alpha); 513 514 if (adjustCovariance) { 515 adjustCovariance(); 516 } 517 518 resultAvailable = true; 519 520 return; 521 } 522 523 // did the trial succeed? 524 for (j = 0, l = 0; l < ma; l++) { 525 if (ia[l]) { 526 atry[l] = a[l] + da[j++]; 527 } 528 } 529 530 mrqcof(atry, covar, da); 531 if (Math.abs(chisq - ochisq) < Math.max(tol, tol * chisq)) { 532 done++; 533 } 534 535 if (chisq < ochisq) { 536 // success, accept the new solution 537 alamda *= 0.1; 538 ochisq = chisq; 539 for (j = 0; j < mfit; j++) { 540 for (k = 0; k < mfit; k++) { 541 alpha.setElementAt(j, k, covar.getElementAt(j, k)); 542 } 543 beta[j] = da[j]; 544 } 545 System.arraycopy(atry, 0, a, 0, ma); 546 } else { 547 // failure, increase alamda 548 alamda *= 10.0; 549 chisq = ochisq; 550 } 551 } 552 553 // too many iterations 554 throw new FittingException("too many iterations"); 555 556 } catch (final AlgebraException | EvaluationException e) { 557 throw new FittingException(e); 558 } 559 } 560 561 /** 562 * Prevents parameter at position i of linear combination of basis functions 563 * to be modified during function fitting. 564 * 565 * @param i position of parameter to be retained. 566 * @param val value to be set for parameter at position i. 567 */ 568 public void hold(final int i, final double val) { 569 ia[i] = false; 570 a[i] = val; 571 } 572 573 /** 574 * Releases parameter at position i of linear combination of basis functions, 575 * so it can be modified again if needed. 576 * 577 * @param i position of parameter to be released. 578 */ 579 public void free(final int i) { 580 ia[i] = true; 581 } 582 583 /** 584 * Adjusts covariance. 585 * Covariance must be adjusted to produce more real results close to the scale 586 * of problem, otherwise estimated covariance will just be a measure of 587 * goodness similar to chi square value because it will be the inverse of 588 * the curvature matrix, which is just a solution of the covariance up to scale. 589 * <p> 590 * Covariance is adjusted taking into account input samples, input standard 591 * deviations of the samples and jacobians of the model function overestimated 592 * parameters using the following expression: Cov = (J'*W*J)^-1 where: 593 * Cov is the covariance of estimated parameters 594 * J is a matrix containing the Jacobians of the function overestimated parameters 595 * for each input parameter x. Each row of J matrix contains an evaluation of 596 * the model function Jacobian for i-th input parameter x. Each column of J matrix 597 * contains the partial derivative of model function over j-th estimated parameter. 598 * W is the inverse of input variances. It's a diagonal matrix containing the 599 * reciprocal of the input variances (squared input standard deviations). That is: 600 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 601 * the k-th standard deviation of input sample k. 602 * 603 * @throws AlgebraException if there are numerical instabilities. 604 * @throws EvaluationException if function evaluation fails. 605 */ 606 private void adjustCovariance() throws AlgebraException, EvaluationException { 607 608 final var xCols = x.getColumns(); 609 if (xRow == null) { 610 xRow = new double[x.getColumns()]; 611 } 612 613 final var invCov = new Matrix(a.length, a.length); 614 final var tmp1 = new Matrix(a.length, 1); 615 final var tmp2 = new Matrix(1, a.length); 616 final var tmpInvCov = new Matrix(a.length, a.length); 617 final var derivatives = new double[a.length]; 618 final var chiSqrDegreesOfFreedom = getChisqDegreesOfFreedom(); 619 for (var i = 0; i < ndat; i++) { 620 x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow); 621 622 evaluator.evaluate(i, xRow, a, derivatives); 623 624 tmp1.fromArray(derivatives); 625 tmp2.fromArray(derivatives); 626 627 tmp1.multiply(tmp2, tmpInvCov); 628 629 final var w = 1.0 / ((chiSqrDegreesOfFreedom + 1) * sig[i] * sig[i]); 630 tmpInvCov.multiplyByScalar(w); 631 invCov.add(tmpInvCov); 632 } 633 634 covar = Utils.inverse(invCov); 635 } 636 637 /** 638 * Internal method to set function evaluator to evaluate function at a given 639 * point and obtain function derivatives respect to each provided parameter. 640 * 641 * @param evaluator function evaluator. 642 * @throws FittingException if evaluation fails. 643 */ 644 private void internalSetFunctionEvaluator(final LevenbergMarquardtMultiDimensionFunctionEvaluator evaluator) 645 throws FittingException { 646 647 try { 648 this.evaluator = evaluator; 649 650 if (evaluator != null) { 651 a = evaluator.createInitialParametersArray(); 652 ma = a.length; 653 covar = new Matrix(ma, ma); 654 alpha = new Matrix(ma, ma); 655 ia = new boolean[ma]; 656 Arrays.fill(ia, true); 657 } 658 } catch (final AlgebraException e) { 659 throw new FittingException(e); 660 } 661 } 662 663 /** 664 * Used by fit to evaluate the linearized fitting matrix alpha, and vector 665 * beta to calculate chi square. 666 * 667 * @param a estimated parameters so far. 668 * @param alpha curvature (i.e. fitting) matrix. 669 * @param beta array where derivative increments for each parameter are 670 * stored. 671 * @throws AlgebraException if there are numerical instabilities. 672 * @throws EvaluationException if function evaluation fails. 673 */ 674 private void mrqcof(final double[] a, final Matrix alpha, final double[] beta) 675 throws AlgebraException, EvaluationException { 676 677 int i; 678 int j; 679 int k; 680 int l; 681 int m; 682 double ymod; 683 double wt; 684 double sig2i; 685 double dy; 686 final var dyda = new double[ma]; 687 688 if (xRow == null) { 689 xRow = new double[x.getColumns()]; 690 } 691 final var xCols = evaluator.getNumberOfDimensions(); 692 693 // initialize (symmetric) alpha, beta 694 for (j = 0; j < mfit; j++) { 695 for (k = 0; k <= j; k++) { 696 alpha.setElementAt(j, k, 0.0); 697 } 698 beta[j] = 0.; 699 } 700 701 chisq = 0.; 702 mse = 0.0; 703 final var degreesOfFreedom = getChisqDegreesOfFreedom(); 704 for (i = 0; i < ndat; i++) { 705 // summation loop over all data 706 x.getSubmatrixAsArray(i, 0, i, xCols - 1, 707 xRow); 708 ymod = evaluator.evaluate(i, xRow, a, dyda); 709 710 sig2i = 1.0 / (sig[i] * sig[i]); 711 dy = y[i] - ymod; 712 for (j = 0, l = 0; l < ma; l++) { 713 if (ia[l]) { 714 wt = dyda[l] * sig2i; 715 final var alphaBuffer = alpha.getBuffer(); 716 for (k = 0, m = 0; m < l + 1; m++) { 717 if (ia[m]) { 718 final int index = alpha.getIndex(j, k++); 719 alphaBuffer[index] += wt * dyda[m]; 720 } 721 } 722 beta[j++] += dy * wt; 723 } 724 } 725 726 // add to mse 727 mse += dy * dy / Math.abs(degreesOfFreedom); 728 729 // and find chi square 730 chisq += dy * dy * sig2i / degreesOfFreedom; 731 } 732 733 // fill in the symmetric side 734 for (j = 1; j < mfit; j++) { 735 for (k = 0; k < j; k++) { 736 alpha.setElementAt(k, j, alpha.getElementAt(j, k)); 737 } 738 } 739 } 740 741 /** 742 * Expand in storage the covariance matrix covar, to take into account 743 * parameters that are being held fixed. (For the latter, return zero 744 * covariances). 745 * 746 * @param covar covariance matrix. 747 */ 748 private void covsrt(final Matrix covar) { 749 int i; 750 int j; 751 int k; 752 for (i = mfit; i < ma; i++) { 753 for (j = 0; j < i + 1; j++) { 754 covar.setElementAt(i, j, 0.0); 755 covar.setElementAt(j, i, 0.0); 756 } 757 } 758 759 k = mfit - 1; 760 for (j = ma - 1; j >= 0; j--) { 761 if (ia[j]) { 762 final var buffer = covar.getBuffer(); 763 for (i = 0; i < ma; i++) { 764 final var pos1 = covar.getIndex(i, k); 765 final var pos2 = covar.getIndex(i, j); 766 swap(buffer, buffer, pos1, pos2); 767 } 768 for (i = 0; i < ma; i++) { 769 final var pos1 = covar.getIndex(k, i); 770 final var pos2 = covar.getIndex(j, i); 771 swap(buffer, buffer, pos1, pos2); 772 } 773 774 k--; 775 } 776 } 777 } 778 779 /** 780 * Swaps values of arrays at provided positions. 781 * 782 * @param array1 1st array. 783 * @param array2 2nd array. 784 * @param pos1 1st position. 785 * @param pos2 2nd position. 786 */ 787 private static void swap(final double[] array1, final double[] array2, final int pos1, final int pos2) { 788 final var value1 = array1[pos1]; 789 final var value2 = array2[pos2]; 790 array1[pos1] = value2; 791 array2[pos2] = value1; 792 } 793 }