1 /* 2 * Copyright (C) 2015 Alberto Irurueta Carro (alberto@irurueta.com) 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 package com.irurueta.numerical.fitting; 17 18 import com.irurueta.algebra.AlgebraException; 19 import com.irurueta.algebra.GaussJordanElimination; 20 import com.irurueta.algebra.Matrix; 21 import com.irurueta.algebra.Utils; 22 import com.irurueta.numerical.EvaluationException; 23 import com.irurueta.numerical.NotReadyException; 24 import com.irurueta.statistics.ChiSqDist; 25 import com.irurueta.statistics.MaxIterationsExceededException; 26 27 import java.util.Arrays; 28 29 /** 30 * Fits provided data (x, y) to a generic non-linear function using 31 * Levenberg-Marquardt iterative algorithm. 32 * This class is based on the implementation available at Numerical Recipes 3rd 33 * Ed, page 801. 34 */ 35 @SuppressWarnings("DuplicatedCode") 36 public class LevenbergMarquardtMultiVariateFitter extends MultiVariateFitter { 37 38 /** 39 * Default convergence parameter. Number of times that tolerance is assumed 40 * to be reached to consider that algorithm has finished iterating. 41 */ 42 public static final int DEFAULT_NDONE = 4; 43 44 /** 45 * Default maximum number of iterations. 46 */ 47 public static final int DEFAULT_ITMAX = 5000; 48 49 /** 50 * Default tolerance to reach convergence. 51 */ 52 public static final double DEFAULT_TOL = 1e-3; 53 54 /** 55 * Indicates whether covariance must be adjusted or not after fitting is finished. 56 */ 57 public static final boolean DEFAULT_ADJUST_COVARIANCE = true; 58 59 /** 60 * Convergence parameter. 61 */ 62 private int ndone = DEFAULT_NDONE; 63 64 /** 65 * Maximum number of iterations. 66 */ 67 private int itmax = DEFAULT_ITMAX; 68 69 /** 70 * Tolerance to reach convergence. 71 */ 72 private double tol = DEFAULT_TOL; 73 74 /** 75 * Evaluator of functions. 76 */ 77 private LevenbergMarquardtMultiVariateFunctionEvaluator evaluator; 78 79 /** 80 * Number of function parameters to be estimated. 81 */ 82 private int ma; 83 84 /** 85 * Determines which parameters can be modified during estimation (if true) 86 * and which ones are locked (if false). 87 */ 88 private boolean[] ia; 89 90 /** 91 * Curvature matrix. 92 */ 93 private Matrix alpha; 94 95 /** 96 * Number of parameters ot be fitted. 97 */ 98 private int mfit = 0; 99 100 /** 101 * An input point to be evaluated. 102 */ 103 private double[] xRow; 104 105 /** 106 * Results of function evaluations. 107 */ 108 private double[] ymod; 109 110 /** 111 * Jacobian of function at a given point. 112 */ 113 Matrix jacobian; 114 115 /** 116 * Mean square error. 117 */ 118 private double mse = 0.0; 119 120 /** 121 * Indicates whether covariance must be adjusted or not. 122 * When covariance adjustment is enabled, then covariance is recomputed taking 123 * into account input samples, input standard deviations of the samples and 124 * jacobians of the model function overestimated parameters using the following 125 * expression: Cov = (J'*W*J)^-1 where: 126 * Cov is the covariance of estimated parameters 127 * J is a matrix containing the Jacobians of the function overestimated parameters 128 * for each input parameter x. Each row of J matrix contains an evaluation of 129 * the model function Jacobian for i-th input parameter x. Each column of J matrix 130 * contains the partial derivative of model function over j-th estimated parameter. 131 * W is the inverse of input variances. It's a diagonal matrix containing the 132 * reciprocal of the input variances (squared input standard deviations). That is: 133 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 134 * the k-th standard deviation of input sample k. 135 * By default, covariance is adjusted after fitting finishes. 136 */ 137 private boolean adjustCovariance = DEFAULT_ADJUST_COVARIANCE; 138 139 /** 140 * Constructor. 141 */ 142 public LevenbergMarquardtMultiVariateFitter() { 143 super(); 144 } 145 146 /** 147 * Constructor. 148 * 149 * @param x input points x where multivariate function f(x1, x2, ...) is 150 * evaluated. 151 * @param y result of evaluation of linear single dimensional function f(x) 152 * at provided multidimensional x points. 153 * @param sig standard deviations of each pair of points (x, y). 154 * @throws IllegalArgumentException if provided number of rows and arrays 155 * don't have the same length. 156 */ 157 public LevenbergMarquardtMultiVariateFitter(final Matrix x, final Matrix y, final double[] sig) { 158 super(x, y, sig); 159 } 160 161 /** 162 * Constructor. 163 * 164 * @param x input points x where multivariate function f(x1, x2, ...) is 165 * evaluated. 166 * @param y result of evaluation of linear single dimensional function f(x) 167 * at provided multidimensional x points. 168 * @param sig standard deviation of all pair of points assuming that 169 * standard deviations are constant. 170 * @throws IllegalArgumentException if provided number of rows and arrays 171 * don't have the same length. 172 */ 173 public LevenbergMarquardtMultiVariateFitter(final Matrix x, final Matrix y, final double sig) { 174 super(x, y, sig); 175 } 176 177 /** 178 * Constructor. 179 * 180 * @param evaluator evaluator to evaluate function at provided point and 181 * obtain the evaluation of function basis at such point. 182 * @throws FittingException if evaluation fails. 183 */ 184 public LevenbergMarquardtMultiVariateFitter( 185 final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator) throws FittingException { 186 this(); 187 setFunctionEvaluator(evaluator); 188 } 189 190 /** 191 * Constructor. 192 * 193 * @param evaluator evaluator to evaluate multivariate function at provided 194 * point and obtain the evaluation of function basis at such point. 195 * @param x input points x where multivariate function f(x1, x2, ...) is 196 * evaluated. 197 * @param y result of evaluation of linear single dimensional function f(x) 198 * at provided multidimensional x points. 199 * @param sig standard deviations of each pair of points (x, y). 200 * @throws IllegalArgumentException if provided number of rows and arrays 201 * don't have the same length. 202 * @throws FittingException if evaluation fails. 203 */ 204 public LevenbergMarquardtMultiVariateFitter( 205 final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator, final Matrix x, final Matrix y, 206 final double[] sig) throws FittingException { 207 this(x, y, sig); 208 setFunctionEvaluator(evaluator); 209 } 210 211 /** 212 * Constructor. 213 * 214 * @param evaluator evaluator to evaluate multivariate function at provided 215 * point and obtain the evaluation of function basis at such point. 216 * @param x input points x where multivariate function f(x1, x2, ...) is 217 * evaluated. 218 * @param y result of evaluation of linear single dimensional function f(x) 219 * at provided multidimensional x points. 220 * @param sig standard deviation of all pair of points assuming that 221 * standard deviations are constant. 222 * @throws IllegalArgumentException if provided number of rows and arrays 223 * don't have the same length. 224 * @throws FittingException if evaluation fails. 225 */ 226 public LevenbergMarquardtMultiVariateFitter( 227 final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator, final Matrix x, final Matrix y, 228 final double sig) throws FittingException { 229 this(x, y, sig); 230 setFunctionEvaluator(evaluator); 231 } 232 233 /** 234 * Returns convergence parameter. 235 * 236 * @return convergence parameter. 237 */ 238 public int getNdone() { 239 return ndone; 240 } 241 242 /** 243 * Sets convergence parameter. 244 * 245 * @param ndone convergence parameter. 246 * @throws IllegalArgumentException if provided value is less than 1. 247 */ 248 public void setNdone(final int ndone) { 249 if (ndone < 1) { 250 throw new IllegalArgumentException(); 251 } 252 this.ndone = ndone; 253 } 254 255 /** 256 * Returns maximum number of iterations. 257 * 258 * @return maximum number of iterations. 259 */ 260 public int getItmax() { 261 return itmax; 262 } 263 264 /** 265 * Sets maximum number of iterations. 266 * 267 * @param itmax maximum number of iterations. 268 * @throws IllegalArgumentException if provided value is zero or negative. 269 */ 270 public void setItmax(final int itmax) { 271 if (itmax <= 0) { 272 throw new IllegalArgumentException(); 273 } 274 this.itmax = itmax; 275 } 276 277 /** 278 * Returns tolerance to reach convergence. 279 * 280 * @return tolerance to reach convergence. 281 */ 282 public double getTol() { 283 return tol; 284 } 285 286 /** 287 * Sets tolerance to reach convergence. 288 * 289 * @param tol tolerance to reach convergence. 290 * @throws IllegalArgumentException if provided value is zero or negative. 291 */ 292 public void setTol(final double tol) { 293 if (tol <= 0.0) { 294 throw new IllegalArgumentException(); 295 } 296 this.tol = tol; 297 } 298 299 /** 300 * Returns function evaluator to evaluate function at a given point and 301 * obtain function jacobian respect to each provided parameter. 302 * 303 * @return function evaluator. 304 */ 305 public LevenbergMarquardtMultiVariateFunctionEvaluator getFunctionEvaluator() { 306 return evaluator; 307 } 308 309 /** 310 * Sets function evaluator to evaluate function at a given point and obtain 311 * function jacobian respect to each provided parameter. 312 * 313 * @param evaluator function evaluator. 314 * @throws FittingException if evaluation fails. 315 */ 316 public final void setFunctionEvaluator( 317 final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator) throws FittingException { 318 internalSetFunctionEvaluator(evaluator); 319 } 320 321 /** 322 * Indicates whether provided instance has enough data to start the function 323 * fitting. 324 * 325 * @return true if this instance is ready to start the function fitting, 326 * false otherwise. 327 */ 328 @Override 329 public boolean isReady() { 330 return evaluator != null && x != null && y != null && x.getRows() == y.getRows() 331 && x.getColumns() == evaluator.getNumberOfDimensions() 332 && y.getColumns() == evaluator.getNumberOfVariables(); 333 } 334 335 /** 336 * Returns curvature matrix. 337 * 338 * @return curvature matrix. 339 */ 340 public Matrix getAlpha() { 341 return alpha; 342 } 343 344 /** 345 * Returns degrees of freedom of computed chi square value. 346 * Degrees of freedom is equal to the number of sampled data minus the 347 * number of estimated parameters. 348 * 349 * @return degrees of freedom of computed chi square value. 350 */ 351 public int getChisqDegreesOfFreedom() { 352 return ndat - ma; 353 } 354 355 /** 356 * Gets mean square error produced by estimated parameters respect to 357 * provided sample data. 358 * 359 * @return mean square error. 360 */ 361 public double getMse() { 362 return mse; 363 } 364 365 /** 366 * Gets the probability of finding a smaller chi square value. 367 * The smaller the found chi square value is, the better the fit of the estimated 368 * parameters to the actual parameter. 369 * Thus, the smaller the chance of finding a smaller chi square value, then the 370 * better the estimated fit is. 371 * 372 * @return probability of finding a smaller chi square value (better fit), expressed 373 * as a value between 0.0 and 1.0. 374 * @throws MaxIterationsExceededException if convergence of incomplete 375 * gamma function cannot be reached. This is rarely thrown and happens 376 * usually for numerically unstable input values. 377 */ 378 public double getP() throws MaxIterationsExceededException { 379 return ChiSqDist.cdf(getChisq(), getChisqDegreesOfFreedom()); 380 } 381 382 /** 383 * Gets a measure of quality of estimated fit as a value between 0.0 and 1.0. 384 * The larger the quality value is, the better the fit that has been estimated. 385 * 386 * @return measure of quality of estimated fit. 387 * @throws MaxIterationsExceededException if convergence of incomplete 388 * gamma function cannot be reached. This is rarely thrown and happens 389 * usually for numerically unstable input values. 390 */ 391 public double getQ() throws MaxIterationsExceededException { 392 return 1.0 - getP(); 393 } 394 395 /** 396 * Indicates whether covariance must be adjusted or not. 397 * When covariance adjustment is enabled, then covariance is recomputed taking 398 * into account input samples, input standard deviations of the samples and 399 * jacobians of the model function overestimated parameters using the following 400 * expression: Cov = (J'*W*J)^-1 where: 401 * Cov is the covariance of estimated parameters 402 * J is a matrix containing the Jacobians of the function overestimated parameters 403 * for each input parameter x. Each row of J matrix contains an evaluation of 404 * the model function Jacobian for i-th input parameter x. Each column of J matrix 405 * contains the partial derivative of model function over j-th estimated parameter. 406 * W is the inverse of input variances. It's a diagonal matrix containing the 407 * reciprocal of the input variances (squared input standard deviations). That is: 408 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 409 * the k-th standard deviation of input sample k. 410 * By default, covariance is adjusted after fitting finishes. 411 * <a href="http://people.duke.edu/~hpgavin/ce281/lm.pdf">http://people.duke.edu/~hpgavin/ce281/lm.pdf</a> 412 * <a href="https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf">https://www8.cs.umu.se/kurser/5DA001/HT07/lectures/lsq-handouts.pdf</a> 413 * Numerical Recipes 3rd Ed, page 812 414 * 415 * @return true if covariance must be adjusted, false otherwise. 416 */ 417 public boolean isCovarianceAdjusted() { 418 return adjustCovariance; 419 } 420 421 /** 422 * Specifies whether covariance must be adjusted or not. 423 * When covariance adjustment is enabled, then covariance is recomputed taking 424 * into account input samples, input standard deviations of the samples and 425 * jacobians of the model function overestimated parameters using the following 426 * expression: Cov = (J'*W*J)^-1 where: 427 * Cov is the covariance of estimated parameters 428 * J is a matrix containing the Jacobians of the function overestimated parameters 429 * for each input parameter x. Each row of J matrix contains an evaluation of 430 * the model function Jacobian for i-th input parameter x. Each column of J matrix 431 * contains the partial derivative of model function over j-th estimated parameter. 432 * W is the inverse of input variances. It's a diagonal matrix containing the 433 * reciprocal of the input variances (squared input standard deviations). That is: 434 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 435 * the k-th standard deviation of input sample k. 436 * By default, covariance is adjusted after fitting finishes. 437 * 438 * @param adjustCovariance true if covariance must be adjusted, false otherwise. 439 */ 440 public void setCovarianceAdjusted(final boolean adjustCovariance) { 441 this.adjustCovariance = adjustCovariance; 442 } 443 444 /** 445 * Fits a function to provided data so that parameters associated to that 446 * function can be estimated along with their covariance matrix and chi 447 * square value. 448 * If chi square value is close to 1, the fit is usually good. 449 * If it is much larger, then error cannot be properly fitted. 450 * If it is close to zero, then the model over-fits the error. 451 * Methods {@link #getP()} and {@link #getQ()} can also be used to determine 452 * the quality of the fit. 453 * 454 * @throws FittingException if fitting fails. 455 * @throws NotReadyException if enough input data has not yet been provided. 456 */ 457 @Override 458 public void fit() throws FittingException, NotReadyException { 459 if (!isReady()) { 460 throw new NotReadyException(); 461 } 462 463 try { 464 resultAvailable = false; 465 466 int j; 467 int k; 468 int l; 469 int iter; 470 int done = 0; 471 double alamda = 0.001; 472 double ochisq; 473 final var atry = new double[ma]; 474 final var beta = new double[ma]; 475 final var da = new double[ma]; 476 477 // number of parameters to be fitted 478 mfit = 0; 479 for (j = 0; j < ma; j++) { 480 if (ia[j]) { 481 mfit++; 482 } 483 } 484 485 final var oneda = new Matrix(mfit, 1); 486 final var temp = new Matrix(mfit, mfit); 487 488 // initialization 489 mrqcof(a, alpha, beta); 490 System.arraycopy(a, 0, atry, 0, ma); 491 492 ochisq = chisq; 493 for (iter = 0; iter < itmax; iter++) { 494 495 if (done == ndone) { 496 // last pass. Use zero alamda 497 alamda = 0.0; 498 } 499 500 for (j = 0; j < mfit; j++) { 501 // alter linearized fitting matrix, by augmenting diagonal 502 // elements 503 for (k = 0; k < mfit; k++) { 504 covar.setElementAt(j, k, alpha.getElementAt(j, k)); 505 } 506 covar.setElementAt(j, j, alpha.getElementAt(j, j) * (1.0 + alamda)); 507 for (k = 0; k < mfit; k++) { 508 temp.setElementAt(j, k, covar.getElementAt(j, k)); 509 } 510 oneda.setElementAt(j, 0, beta[j]); 511 } 512 513 // matrix solution 514 GaussJordanElimination.process(temp, oneda); 515 516 for (j = 0; j < mfit; j++) { 517 for (k = 0; k < mfit; k++) { 518 covar.setElementAt(j, k, temp.getElementAt(j, k)); 519 } 520 da[j] = oneda.getElementAt(j, 0); 521 } 522 523 if (done == ndone) { 524 // Converged. Clean up and return 525 covsrt(covar); 526 covsrt(alpha); 527 528 if (adjustCovariance) { 529 adjustCovariance(); 530 } 531 532 resultAvailable = true; 533 534 return; 535 } 536 537 // did the trial succeed? 538 for (j = 0, l = 0; l < ma; l++) { 539 if (ia[l]) { 540 atry[l] = a[l] + da[j++]; 541 } 542 } 543 544 mrqcof(atry, covar, da); 545 if (Math.abs(chisq - ochisq) < Math.max(tol, tol * chisq)) { 546 done++; 547 } 548 549 if (chisq < ochisq) { 550 // success, accept the new solution 551 alamda *= 0.1; 552 ochisq = chisq; 553 for (j = 0; j < mfit; j++) { 554 for (k = 0; k < mfit; k++) { 555 alpha.setElementAt(j, k, covar.getElementAt(j, k)); 556 } 557 beta[j] = da[j]; 558 } 559 System.arraycopy(atry, 0, a, 0, ma); 560 } else { 561 // failure, increase alamda 562 alamda *= 10.0; 563 chisq = ochisq; 564 } 565 } 566 567 // too many iterations 568 throw new FittingException("too many iterations"); 569 570 } catch (final AlgebraException | EvaluationException e) { 571 throw new FittingException(e); 572 } 573 } 574 575 /** 576 * Prevents parameter at position i of linear combination of basis functions 577 * to be modified during function fitting. 578 * 579 * @param i position of parameter to be retained. 580 * @param val value to be set for parameter at position i. 581 */ 582 public void hold(final int i, final double val) { 583 ia[i] = false; 584 a[i] = val; 585 } 586 587 /** 588 * Releases parameter at position i of linear combination of basis functions, 589 * so it can be modified again if needed. 590 * 591 * @param i position of parameter to be released. 592 */ 593 public void free(final int i) { 594 ia[i] = true; 595 } 596 597 /** 598 * Adjusts covariance. 599 * Covariance must be adjusted to produce more real results close to the scale 600 * of problem, otherwise estimated covariance will just be a measure of 601 * goodness similar to chi square value because it will be the inverse of 602 * the curvature matrix, which is just a solution of the covariance up to scale. 603 * <p> 604 * Covariance is adjusted taking into account input samples, input standard 605 * deviations of the samples and jacobians of the model function overestimated 606 * parameters using the following expression: Cov = (J'*W*J)^-1 where: 607 * Cov is the covariance of estimated parameters 608 * J is a matrix containing the Jacobians of the function overestimated parameters 609 * for each input parameter x. Each row of J matrix contains an evaluation of 610 * the model function Jacobian for i-th input parameter x. Each column of J matrix 611 * contains the partial derivative of model function over j-th estimated parameter. 612 * W is the inverse of input variances. It's a diagonal matrix containing the 613 * reciprocal of the input variances (squared input standard deviations). That is: 614 * W = diag(w) where k element of w is wk = 1 / sigmak^2, which corresponds to 615 * the k-th standard deviation of input sample k. 616 * 617 * @throws AlgebraException if there are numerical instabilities. 618 * @throws EvaluationException if function evaluation fails. 619 */ 620 private void adjustCovariance() throws AlgebraException, EvaluationException { 621 622 final var nVars = evaluator.getNumberOfVariables(); 623 final var xCols = x.getColumns(); 624 if (xRow == null) { 625 xRow = new double[x.getColumns()]; 626 } 627 if (jacobian == null) { 628 jacobian = new Matrix(nVars, ma); 629 } 630 631 final var jacobianTrans = new Matrix(ma, nVars); 632 633 final var invCov = new Matrix(a.length, a.length); 634 final var tmpInvCov = new Matrix(a.length, a.length); 635 final var chiSqrDegreesOfFreedom = getChisqDegreesOfFreedom(); 636 for (int i = 0; i < ndat; i++) { 637 x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow); 638 639 evaluator.evaluate(i, xRow, ymod, a, jacobian); 640 641 jacobian.transpose(jacobianTrans); 642 643 jacobianTrans.multiply(jacobian, tmpInvCov); 644 645 final var w = 1.0 / ((chiSqrDegreesOfFreedom + 1) * sig[i] * sig[i]); 646 tmpInvCov.multiplyByScalar(w); 647 invCov.add(tmpInvCov); 648 } 649 650 covar = Utils.inverse(invCov); 651 } 652 653 /** 654 * Internal method to set function evaluator to evaluate function at a given 655 * point and obtain function jacobian respect to each provided parameter. 656 * 657 * @param evaluator function evaluator. 658 * @throws FittingException if evaluation fails. 659 */ 660 private void internalSetFunctionEvaluator(final LevenbergMarquardtMultiVariateFunctionEvaluator evaluator) 661 throws FittingException { 662 663 try { 664 this.evaluator = evaluator; 665 666 if (evaluator != null) { 667 a = evaluator.createInitialParametersArray(); 668 ma = a.length; 669 covar = new Matrix(ma, ma); 670 alpha = new Matrix(ma, ma); 671 ia = new boolean[ma]; 672 Arrays.fill(ia, true); 673 } 674 } catch (final AlgebraException e) { 675 throw new FittingException(e); 676 } 677 } 678 679 /** 680 * Used by fit to evaluate the linearized fitting matrix alpha, and vector 681 * beta to calculate chi square. 682 * 683 * @param a estimated parameters so far. 684 * @param alpha curvature (i.e. fitting) matrix. 685 * @param beta array where derivative increments for each parameter are 686 * stored. 687 * @throws AlgebraException if there are numerical instabilities. 688 * @throws EvaluationException if function evaluation fails. 689 */ 690 private void mrqcof(final double[] a, final Matrix alpha, final double[] beta) 691 throws AlgebraException, EvaluationException { 692 693 int i; 694 int j; 695 int k; 696 int l; 697 int m; 698 double wt; 699 double sig2i; 700 double dy; 701 final var nVars = evaluator.getNumberOfVariables(); 702 if (jacobian == null) { 703 jacobian = new Matrix(nVars, ma); 704 } 705 if (xRow == null) { 706 xRow = new double[x.getColumns()]; 707 } 708 final var xCols = evaluator.getNumberOfDimensions(); 709 if (ymod == null) { 710 ymod = new double[nVars]; 711 } 712 713 for (j = 0; j < mfit; j++) { 714 for (k = 0; k <= j; k++) { 715 alpha.setElementAt(j, k, 0.0); 716 } 717 beta[j] = 0.; 718 } 719 720 chisq = 0.0; 721 mse = 0.0; 722 final var degreesOfFreedom = getChisqDegreesOfFreedom(); 723 for (i = 0; i < ndat; i++) { 724 // summation loop over all data 725 x.getSubmatrixAsArray(i, 0, i, xCols - 1, xRow); 726 evaluator.evaluate(i, xRow, ymod, a, jacobian); 727 sig2i = 1.0 / (sig[i] * sig[i]); 728 for (int n = 0; n < nVars; n++) { 729 dy = y.getElementAt(i, n) - ymod[n]; 730 for (j = 0, l = 0; l < ma; l++) { 731 if (ia[l]) { 732 wt = jacobian.getElementAt(n, l) * sig2i; 733 final var alphaBuffer = alpha.getBuffer(); 734 for (k = 0, m = 0; m < l + 1; m++) { 735 if (ia[m]) { 736 final var index = alpha.getIndex(j, k++); 737 alphaBuffer[index] += wt * jacobian.getElementAt(n, m); 738 } 739 } 740 beta[j++] += dy * wt; 741 } 742 } 743 744 // add to mse 745 mse += dy * dy / Math.abs(degreesOfFreedom); 746 747 // and find chi square 748 chisq += dy * dy * sig2i / degreesOfFreedom; 749 } 750 } 751 752 // fill in the symmetric side 753 for (j = 1; j < mfit; j++) { 754 for (k = 0; k < j; k++) { 755 alpha.setElementAt(k, j, alpha.getElementAt(j, k)); 756 } 757 } 758 } 759 760 /** 761 * Expand in storage the covariance matrix covar, to take into account 762 * parameters that are being held fixed. (For the latter, return zero 763 * covariances). 764 * 765 * @param covar covariance matrix. 766 */ 767 private void covsrt(final Matrix covar) { 768 int i; 769 int j; 770 int k; 771 for (i = mfit; i < ma; i++) { 772 for (j = 0; j < i + 1; j++) { 773 covar.setElementAt(i, j, 0.0); 774 covar.setElementAt(j, i, 0.0); 775 } 776 } 777 778 k = mfit - 1; 779 for (j = ma - 1; j >= 0; j--) { 780 if (ia[j]) { 781 final var buffer = covar.getBuffer(); 782 for (i = 0; i < ma; i++) { 783 final var pos1 = covar.getIndex(i, k); 784 final var pos2 = covar.getIndex(i, j); 785 swap(buffer, buffer, pos1, pos2); 786 } 787 for (i = 0; i < ma; i++) { 788 final var pos1 = covar.getIndex(k, i); 789 final var pos2 = covar.getIndex(j, i); 790 swap(buffer, buffer, pos1, pos2); 791 } 792 793 k--; 794 } 795 } 796 } 797 798 /** 799 * Swaps values of arrays at provided positions. 800 * 801 * @param array1 1st array. 802 * @param array2 2nd array. 803 * @param pos1 1st position. 804 * @param pos2 2nd position. 805 */ 806 private static void swap(final double[] array1, final double[] array2, final int pos1, final int pos2) { 807 final var value1 = array1[pos1]; 808 final var value2 = array2[pos2]; 809 array1[pos1] = value2; 810 array2[pos2] = value1; 811 } 812 }