CLHEP VERSION Reference Documentation
   
CLHEP Home Page     CLHEP Documentation     CLHEP Bug Reports

SymMatrix.cc
Go to the documentation of this file.
1 // -*- C++ -*-
2 // ---------------------------------------------------------------------------
3 //
4 // This file is a part of the CLHEP - a Class Library for High Energy Physics.
5 //
6 
7 #ifdef GNUPRAGMA
8 #pragma implementation
9 #endif
10 
11 #include <string.h>
12 #include <float.h> // for DBL_EPSILON
13 
14 #include "CLHEP/Matrix/defs.h"
15 #include "CLHEP/Random/Random.h"
16 #include "CLHEP/Matrix/SymMatrix.h"
17 #include "CLHEP/Matrix/Matrix.h"
18 #include "CLHEP/Matrix/DiagMatrix.h"
19 #include "CLHEP/Matrix/Vector.h"
20 
21 #ifdef HEP_DEBUG_INLINE
22 #include "CLHEP/Matrix/SymMatrix.icc"
23 #endif
24 
25 namespace CLHEP {
26 
27 // Simple operation for all elements
28 
29 #define SIMPLE_UOP(OPER) \
30  HepMatrix::mIter a=m.begin(); \
31  HepMatrix::mIter e=m.begin()+num_size(); \
32  for(;a<e; a++) (*a) OPER t;
33 
34 #define SIMPLE_BOP(OPER) \
35  HepMatrix::mIter a=m.begin(); \
36  HepMatrix::mcIter b=hm2.m.begin(); \
37  HepMatrix::mcIter e=m.begin()+num_size(); \
38  for(;a<e; a++, b++) (*a) OPER (*b);
39 
40 #define SIMPLE_TOP(OPER) \
41  HepMatrix::mcIter a=hm1.m.begin(); \
42  HepMatrix::mcIter b=hm2.m.begin(); \
43  HepMatrix::mIter t=mret.m.begin(); \
44  HepMatrix::mcIter e=hm1.m.begin()+hm1.num_size(); \
45  for( ;a<e; a++, b++, t++) (*t) = (*a) OPER (*b);
46 
47 #define CHK_DIM_2(r1,r2,c1,c2,fun) \
48  if (r1!=r2 || c1!=c2) { \
49  HepGenMatrix::error("Range error in SymMatrix function " #fun "(1)."); \
50  }
51 
52 #define CHK_DIM_1(c1,r2,fun) \
53  if (c1!=r2) { \
54  HepGenMatrix::error("Range error in SymMatrix function " #fun "(2)."); \
55  }
56 
57 // Constructors. (Default constructors are inlined and in .icc file)
58 
60  : m(p*(p+1)/2), nrow(p)
61 {
62  size_ = nrow * (nrow+1) / 2;
63  m.assign(size_,0);
64 }
65 
67  : m(p*(p+1)/2), nrow(p)
68 {
69  size_ = nrow * (nrow+1) / 2;
70 
71  m.assign(size_,0);
72  switch(init)
73  {
74  case 0:
75  break;
76 
77  case 1:
78  {
80  for(int i=0;i<nrow;++i) {
81  a = m.begin() + (i+1)*i/2 + i;
82  *a = 1.0;
83  }
84  break;
85  }
86  default:
87  error("SymMatrix: initialization must be either 0 or 1.");
88  }
89 }
90 
92  : m(p*(p+1)/2), nrow(p)
93 {
94  size_ = nrow * (nrow+1) / 2;
95  HepMatrix::mIter a = m.begin();
96  HepMatrix::mIter b = m.begin() + size_;
97  for(;a<b;a++) *a = r();
98 }
99 
100 //
101 // Destructor
102 //
104 }
105 
107  : HepGenMatrix(hm1), m(hm1.size_), nrow(hm1.nrow), size_(hm1.size_)
108 {
109  m = hm1.m;
110 }
111 
113  : m(hm1.nrow*(hm1.nrow+1)/2), nrow(hm1.nrow)
114 {
115  size_ = nrow * (nrow+1) / 2;
116 
117  int n = num_row();
118  m.assign(size_,0);
119 
120  HepMatrix::mIter mrr = m.begin();
121  HepMatrix::mcIter mr = hm1.m.begin();
122  for(int r=1;r<=n;r++) {
123  *mrr = *(mr++);
124  if(r<n) mrr += (r+1);
125  }
126 }
127 
128 //
129 //
130 // Sub matrix
131 //
132 //
133 
134 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row) const
135 #ifdef HEP_GNU_OPTIMIZED_RETURN
136 return mret(max_row-min_row+1);
137 {
138 #else
139 {
140  HepSymMatrix mret(max_row-min_row+1);
141 #endif
142  if(max_row > num_row())
143  error("HepSymMatrix::sub: Index out of range");
144  HepMatrix::mIter a = mret.m.begin();
145  HepMatrix::mcIter b1 = m.begin() + (min_row+2)*(min_row-1)/2;
146  int rowsize=mret.num_row();
147  for(int irow=1; irow<=rowsize; irow++) {
148  HepMatrix::mcIter b = b1;
149  for(int icol=0; icol<irow; ++icol) {
150  *(a++) = *(b++);
151  }
152  if(irow<rowsize) b1 += irow+min_row-1;
153  }
154  return mret;
155 }
156 
157 HepSymMatrix HepSymMatrix::sub(int min_row, int max_row)
158 {
159  HepSymMatrix mret(max_row-min_row+1);
160  if(max_row > num_row())
161  error("HepSymMatrix::sub: Index out of range");
162  HepMatrix::mIter a = mret.m.begin();
163  HepMatrix::mIter b1 = m.begin() + (min_row+2)*(min_row-1)/2;
164  int rowsize=mret.num_row();
165  for(int irow=1; irow<=rowsize; irow++) {
166  HepMatrix::mIter b = b1;
167  for(int icol=0; icol<irow; ++icol) {
168  *(a++) = *(b++);
169  }
170  if(irow<rowsize) b1 += irow+min_row-1;
171  }
172  return mret;
173 }
174 
175 void HepSymMatrix::sub(int row,const HepSymMatrix &hm1)
176 {
177  if(row <1 || row+hm1.num_row()-1 > num_row() )
178  error("HepSymMatrix::sub: Index out of range");
179  HepMatrix::mcIter a = hm1.m.begin();
180  HepMatrix::mIter b1 = m.begin() + (row+2)*(row-1)/2;
181  int rowsize=hm1.num_row();
182  for(int irow=1; irow<=rowsize; ++irow) {
183  HepMatrix::mIter b = b1;
184  for(int icol=0; icol<irow; ++icol) {
185  *(b++) = *(a++);
186  }
187  if(irow<rowsize) b1 += irow+row-1;
188  }
189 }
190 
191 //
192 // Direct sum of two matricies
193 //
194 
196  const HepSymMatrix &hm2)
197 #ifdef HEP_GNU_OPTIMIZED_RETURN
198  return mret(hm1.num_row() + hm2.num_row(), 0);
199 {
200 #else
201 {
202  HepSymMatrix mret(hm1.num_row() + hm2.num_row(),
203  0);
204 #endif
205  mret.sub(1,hm1);
206  mret.sub(hm1.num_row()+1,hm2);
207  return mret;
208 }
209 
210 /* -----------------------------------------------------------------------
211  This section contains support routines for matrix.h. This section contains
212  The two argument functions +,-. They call the copy constructor and +=,-=.
213  ----------------------------------------------------------------------- */
215 #ifdef HEP_GNU_OPTIMIZED_RETURN
216  return hm2(nrow);
217 {
218 #else
219 {
220  HepSymMatrix hm2(nrow);
221 #endif
222  HepMatrix::mcIter a=m.begin();
223  HepMatrix::mIter b=hm2.m.begin();
224  HepMatrix::mcIter e=m.begin()+num_size();
225  for(;a<e; a++, b++) (*b) = -(*a);
226  return hm2;
227 }
228 
229 
230 
232 #ifdef HEP_GNU_OPTIMIZED_RETURN
233  return mret(hm1);
234 {
235 #else
236 {
237  HepMatrix mret(hm1);
238 #endif
239  CHK_DIM_2(hm1.num_row(),hm2.num_row(), hm1.num_col(),hm2.num_col(),+);
240  mret += hm2;
241  return mret;
242 }
244 #ifdef HEP_GNU_OPTIMIZED_RETURN
245  return mret(hm2);
246 {
247 #else
248 {
249  HepMatrix mret(hm2);
250 #endif
251  CHK_DIM_2(hm1.num_row(),hm2.num_row(),hm1.num_col(),hm2.num_col(),+);
252  mret += hm1;
253  return mret;
254 }
255 
257 #ifdef HEP_GNU_OPTIMIZED_RETURN
258  return mret(hm1.nrow);
259 {
260 #else
261 {
262  HepSymMatrix mret(hm1.nrow);
263 #endif
264  CHK_DIM_1(hm1.nrow, hm2.nrow,+);
265  SIMPLE_TOP(+)
266  return mret;
267 }
268 
269 //
270 // operator -
271 //
272 
273 HepMatrix operator-(const HepMatrix &hm1,const HepSymMatrix &hm2)
274 #ifdef HEP_GNU_OPTIMIZED_RETURN
275  return mret(hm1);
276 {
277 #else
278 {
279  HepMatrix mret(hm1);
280 #endif
281  CHK_DIM_2(hm1.num_row(),hm2.num_row(),
282  hm1.num_col(),hm2.num_col(),-);
283  mret -= hm2;
284  return mret;
285 }
287 #ifdef HEP_GNU_OPTIMIZED_RETURN
288  return mret(hm1);
289 {
290 #else
291 {
292  HepMatrix mret(hm1);
293 #endif
294  CHK_DIM_2(hm1.num_row(),hm2.num_row(),
295  hm1.num_col(),hm2.num_col(),-);
296  mret -= hm2;
297  return mret;
298 }
299 
301 #ifdef HEP_GNU_OPTIMIZED_RETURN
302  return mret(hm1.num_row());
303 {
304 #else
305 {
306  HepSymMatrix mret(hm1.num_row());
307 #endif
308  CHK_DIM_1(hm1.num_row(),hm2.num_row(),-);
309  SIMPLE_TOP(-)
310  return mret;
311 }
312 
313 /* -----------------------------------------------------------------------
314  This section contains support routines for matrix.h. This file contains
315  The two argument functions *,/. They call copy constructor and then /=,*=.
316  ----------------------------------------------------------------------- */
317 
319 const HepSymMatrix &hm1,double t)
320 #ifdef HEP_GNU_OPTIMIZED_RETURN
321  return mret(hm1);
322 {
323 #else
324 {
325  HepSymMatrix mret(hm1);
326 #endif
327  mret /= t;
328  return mret;
329 }
330 
331 HepSymMatrix operator*(const HepSymMatrix &hm1,double t)
332 #ifdef HEP_GNU_OPTIMIZED_RETURN
333  return mret(hm1);
334 {
335 #else
336 {
337  HepSymMatrix mret(hm1);
338 #endif
339  mret *= t;
340  return mret;
341 }
342 
343 HepSymMatrix operator*(double t,const HepSymMatrix &hm1)
344 #ifdef HEP_GNU_OPTIMIZED_RETURN
345  return mret(hm1);
346 {
347 #else
348 {
349  HepSymMatrix mret(hm1);
350 #endif
351  mret *= t;
352  return mret;
353 }
354 
355 
357 #ifdef HEP_GNU_OPTIMIZED_RETURN
358  return mret(hm1.num_row(),hm2.num_col());
359 {
360 #else
361  {
362  HepMatrix mret(hm1.num_row(),hm2.num_col());
363 #endif
364  CHK_DIM_1(hm1.num_col(),hm2.num_row(),*);
365  HepMatrix::mcIter mit1, mit2, sp,snp; //mit2=0
366  double temp;
367  HepMatrix::mIter mir=mret.m.begin();
368  for(mit1=hm1.m.begin();
369  mit1<hm1.m.begin()+hm1.num_row()*hm1.num_col();
370  mit1 = mit2)
371  {
372  snp=hm2.m.begin();
373  for(int step=1;step<=hm2.num_row();++step)
374  {
375  mit2=mit1;
376  sp=snp;
377  snp+=step;
378  temp=0;
379  while(sp<snp)
380  temp+=*(sp++)*(*(mit2++));
381  if( step<hm2.num_row() ) { // only if we aren't on the last row
382  sp+=step-1;
383  for(int stept=step+1;stept<=hm2.num_row();stept++)
384  {
385  temp+=*sp*(*(mit2++));
386  if(stept<hm2.num_row()) sp+=stept;
387  }
388  } // if(step
389  *(mir++)=temp;
390  } // for(step
391  } // for(mit1
392  return mret;
393  }
394 
396 #ifdef HEP_GNU_OPTIMIZED_RETURN
397  return mret(hm1.num_row(),hm2.num_col());
398 {
399 #else
400 {
401  HepMatrix mret(hm1.num_row(),hm2.num_col());
402 #endif
403  CHK_DIM_1(hm1.num_col(),hm2.num_row(),*);
404  int step,stept;
405  HepMatrix::mcIter mit1,mit2,sp,snp;
406  double temp;
407  HepMatrix::mIter mir=mret.m.begin();
408  for(step=1,snp=hm1.m.begin();step<=hm1.num_row();snp+=step++)
409  for(mit1=hm2.m.begin();mit1<hm2.m.begin()+hm2.num_col();mit1++)
410  {
411  mit2=mit1;
412  sp=snp;
413  temp=0;
414  while(sp<snp+step)
415  {
416  temp+=*mit2*(*(sp++));
417  if( hm2.num_size()-(mit2-hm2.m.begin())>hm2.num_col() ){
418  mit2+=hm2.num_col();
419  }
420  }
421  if(step<hm1.num_row()) { // only if we aren't on the last row
422  sp+=step-1;
423  for(stept=step+1;stept<=hm1.num_row();stept++)
424  {
425  temp+=*mit2*(*sp);
426  if(stept<hm1.num_row()) {
427  mit2+=hm2.num_col();
428  sp+=stept;
429  }
430  }
431  } // if(step
432  *(mir++)=temp;
433  } // for(mit1
434  return mret;
435 }
436 
438 #ifdef HEP_GNU_OPTIMIZED_RETURN
439  return mret(hm1.num_row(),hm1.num_row());
440 {
441 #else
442 {
443  HepMatrix mret(hm1.num_row(),hm1.num_row());
444 #endif
445  CHK_DIM_1(hm1.num_col(),hm2.num_row(),*);
446  int step1,stept1,step2,stept2;
447  HepMatrix::mcIter snp1,sp1,snp2,sp2;
448  double temp;
449  HepMatrix::mIter mr = mret.m.begin();
450  snp1=hm1.m.begin();
451  for(step1=1;step1<=hm1.num_row();++step1) {
452  snp2=hm2.m.begin();
453  for(step2=1;step2<=hm2.num_row();++step2)
454  {
455  sp1=snp1;
456  sp2=snp2;
457  snp2+=step2;
458  temp=0;
459  if(step1<step2)
460  {
461  while(sp1<snp1+step1) {
462  temp+=(*(sp1++))*(*(sp2++));
463  }
464  sp1+=step1-1;
465  for(stept1=step1+1;stept1!=step2+1;++stept1) {
466  temp+=(*sp1)*(*(sp2++));
467  if(stept1<hm2.num_row()) sp1+=stept1;
468  }
469  if(step2<hm2.num_row()) { // only if we aren't on the last row
470  sp2+=step2-1;
471  for(stept2=step2+1;stept2<=hm2.num_row();stept1++,stept2++) {
472  temp+=(*sp1)*(*sp2);
473  if(stept2<hm2.num_row()) {
474  sp1+=stept1;
475  sp2+=stept2;
476  }
477  } // for(stept2
478  } // if(step2
479  } // step1<step2
480  else
481  {
482  while(sp2<snp2) {
483  temp+=(*(sp1++))*(*(sp2++));
484  }
485  if(step2<hm2.num_row()) { // only if we aren't on the last row
486  sp2+=step2-1;
487  for(stept2=step2+1;stept2!=step1+1;stept2++) {
488  temp+=(*(sp1++))*(*sp2);
489  if(stept2<hm1.num_row()) sp2+=stept2;
490  }
491  if(step1<hm1.num_row()) { // only if we aren't on the last row
492  sp1+=step1-1;
493  for(stept1=step1+1;stept1<=hm1.num_row();stept1++,stept2++) {
494  temp+=(*sp1)*(*sp2);
495  if(stept1<hm1.num_row()) {
496  sp1+=stept1;
497  sp2+=stept2;
498  }
499  } // for(stept1
500  } // if(step1
501  } // if(step2
502  } // else
503  *(mr++)=temp;
504  } // for(step2
505  if(step1<hm1.num_row()) snp1+=step1;
506  } // for(step1
507  return mret;
508 }
509 
511 #ifdef HEP_GNU_OPTIMIZED_RETURN
512  return mret(hm1.num_row());
513 {
514 #else
515 {
516  HepVector mret(hm1.num_row());
517 #endif
518  CHK_DIM_1(hm1.num_col(),hm2.num_row(),*);
519  HepMatrix::mcIter sp,snp,vpt;
520  double temp;
521  int step,stept;
522  HepMatrix::mIter vrp=mret.m.begin();
523  for(step=1,snp=hm1.m.begin();step<=hm1.num_row();++step)
524  {
525  sp=snp;
526  vpt=hm2.m.begin();
527  snp+=step;
528  temp=0;
529  while(sp<snp)
530  temp+=*(sp++)*(*(vpt++));
531  if(step<hm1.num_row()) sp+=step-1;
532  for(stept=step+1;stept<=hm1.num_row();stept++)
533  {
534  temp+=*sp*(*(vpt++));
535  if(stept<hm1.num_row()) sp+=stept;
536  }
537  *(vrp++)=temp;
538  } // for(step
539  return mret;
540 }
541 
543 #ifdef HEP_GNU_OPTIMIZED_RETURN
544  return mret(v.num_row());
545 {
546 #else
547 {
548  HepSymMatrix mret(v.num_row());
549 #endif
550  HepMatrix::mIter mr=mret.m.begin();
551  HepMatrix::mcIter vt1,vt2;
552  for(vt1=v.m.begin();vt1<v.m.begin()+v.num_row();vt1++)
553  for(vt2=v.m.begin();vt2<=vt1;vt2++)
554  *(mr++)=(*vt1)*(*vt2);
555  return mret;
556 }
557 
558 /* -----------------------------------------------------------------------
559  This section contains the assignment and inplace operators =,+=,-=,*=,/=.
560  ----------------------------------------------------------------------- */
561 
563 {
564  CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),+=);
565  HepMatrix::mcIter sjk = hm2.m.begin();
566  // j >= k
567  for(int j=0; j!=nrow; ++j) {
568  for(int k=0; k<=j; ++k) {
569  m[j*ncol+k] += *sjk;
570  // make sure this is not a diagonal element
571  if(k!=j) m[k*nrow+j] += *sjk;
572  ++sjk;
573  }
574  }
575  return (*this);
576 }
577 
579 {
580  CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),+=);
581  SIMPLE_BOP(+=)
582  return (*this);
583 }
584 
586 {
587  CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),-=);
588  HepMatrix::mcIter sjk = hm2.m.begin();
589  // j >= k
590  for(int j=0; j!=nrow; ++j) {
591  for(int k=0; k<=j; ++k) {
592  m[j*ncol+k] -= *sjk;
593  // make sure this is not a diagonal element
594  if(k!=j) m[k*nrow+j] -= *sjk;
595  ++sjk;
596  }
597  }
598  return (*this);
599 }
600 
602 {
603  CHK_DIM_2(num_row(),hm2.num_row(),num_col(),hm2.num_col(),-=);
604  SIMPLE_BOP(-=)
605  return (*this);
606 }
607 
609 {
610  SIMPLE_UOP(/=)
611  return (*this);
612 }
613 
615 {
616  SIMPLE_UOP(*=)
617  return (*this);
618 }
619 
621 {
622  // define size, rows, and columns of *this
623  nrow = ncol = hm1.nrow;
624  if(nrow*ncol != size_)
625  {
626  size_ = nrow*ncol;
627  m.resize(size_);
628  }
629  // begin copy
630  mcIter sjk = hm1.m.begin();
631  // j >= k
632  for(int j=0; j!=nrow; ++j) {
633  for(int k=0; k<=j; ++k) {
634  m[j*ncol+k] = *sjk;
635  // we could copy the diagonal element twice or check
636  // doing the check may be a tiny bit faster,
637  // so we choose that option for now
638  if(k!=j) m[k*nrow+j] = *sjk;
639  ++sjk;
640  }
641  }
642  return (*this);
643 }
644 
646 {
647  if(hm1.nrow != nrow)
648  {
649  nrow = hm1.nrow;
650  size_ = hm1.size_;
651  m.resize(size_);
652  }
653  m = hm1.m;
654  return (*this);
655 }
656 
658 {
659  if(hm1.nrow != nrow)
660  {
661  nrow = hm1.nrow;
662  size_ = nrow * (nrow+1) / 2;
663  m.resize(size_);
664  }
665 
666  m.assign(size_,0);
667  HepMatrix::mIter mrr = m.begin();
668  HepMatrix::mcIter mr = hm1.m.begin();
669  for(int r=1; r<=nrow; r++) {
670  *mrr = *(mr++);
671  if(r<nrow) mrr += (r+1);
672  }
673  return (*this);
674 }
675 
676 // Print the Matrix.
677 
678 std::ostream& operator<<(std::ostream &os, const HepSymMatrix &q)
679 {
680  os << std::endl;
681 /* Fixed format needs 3 extra characters for field, while scientific needs 7 */
682  int width;
683  if(os.flags() & std::ios::fixed)
684  width = os.precision()+3;
685  else
686  width = os.precision()+7;
687  for(int irow = 1; irow<= q.num_row(); irow++)
688  {
689  for(int icol = 1; icol <= q.num_col(); icol++)
690  {
691  os.width(width);
692  os << q(irow,icol) << " ";
693  }
694  os << std::endl;
695  }
696  return os;
697 }
698 
699 HepSymMatrix HepSymMatrix::
700 apply(double (*f)(double, int, int)) const
701 #ifdef HEP_GNU_OPTIMIZED_RETURN
702 return mret(num_row());
703 {
704 #else
705 {
706  HepSymMatrix mret(num_row());
707 #endif
708  HepMatrix::mcIter a = m.begin();
709  HepMatrix::mIter b = mret.m.begin();
710  for(int ir=1;ir<=num_row();ir++) {
711  for(int ic=1;ic<=ir;ic++) {
712  *(b++) = (*f)(*(a++), ir, ic);
713  }
714  }
715  return mret;
716 }
717 
719 {
720  if(hm1.nrow != nrow)
721  {
722  nrow = hm1.nrow;
723  size_ = nrow * (nrow+1) / 2;
724  m.resize(size_);
725  }
726  HepMatrix::mcIter a = hm1.m.begin();
727  HepMatrix::mIter b = m.begin();
728  for(int r=1;r<=nrow;r++) {
729  HepMatrix::mcIter d = a;
730  for(int c=1;c<=r;c++) {
731  *(b++) = *(d++);
732  }
733  if(r<nrow) a += nrow;
734  }
735 }
736 
738 #ifdef HEP_GNU_OPTIMIZED_RETURN
739  return mret(hm1.num_row());
740 {
741 #else
742 {
743  HepSymMatrix mret(hm1.num_row());
744 #endif
745  HepMatrix temp = hm1*(*this);
746 // If hm1*(*this) has correct dimensions, then so will the hm1.T multiplication.
747 // So there is no need to check dimensions again.
748  int n = hm1.num_col();
749  HepMatrix::mIter mr = mret.m.begin();
750  HepMatrix::mIter tempr1 = temp.m.begin();
751  for(int r=1;r<=mret.num_row();r++) {
752  HepMatrix::mcIter hm1c1 = hm1.m.begin();
753  for(int c=1;c<=r;c++) {
754  register double tmp = 0.0;
755  HepMatrix::mIter tempri = tempr1;
756  HepMatrix::mcIter hm1ci = hm1c1;
757  for(int i=1;i<=hm1.num_col();i++) {
758  tmp+=(*(tempri++))*(*(hm1ci++));
759  }
760  *(mr++) = tmp;
761  hm1c1 += n;
762  }
763  tempr1 += n;
764  }
765  return mret;
766 }
767 
769 #ifdef HEP_GNU_OPTIMIZED_RETURN
770  return mret(hm1.num_row());
771 {
772 #else
773 {
774  HepSymMatrix mret(hm1.num_row());
775 #endif
776  HepMatrix temp = hm1*(*this);
777  int n = hm1.num_col();
778  HepMatrix::mIter mr = mret.m.begin();
779  HepMatrix::mIter tempr1 = temp.m.begin();
780  for(int r=1;r<=mret.num_row();r++) {
781  HepMatrix::mcIter hm1c1 = hm1.m.begin();
782  int c;
783  for(c=1;c<=r;c++) {
784  register double tmp = 0.0;
785  HepMatrix::mIter tempri = tempr1;
786  HepMatrix::mcIter hm1ci = hm1c1;
787  int i;
788  for(i=1;i<c;i++) {
789  tmp+=(*(tempri++))*(*(hm1ci++));
790  }
791  for(i=c;i<=hm1.num_col();i++) {
792  tmp+=(*(tempri++))*(*(hm1ci));
793  if(i<hm1.num_col()) hm1ci += i;
794  }
795  *(mr++) = tmp;
796  hm1c1 += c;
797  }
798  tempr1 += n;
799  }
800  return mret;
801 }
802 
804 const {
805  register double mret = 0.0;
806  HepVector temp = (*this) *hm1;
807 // If hm1*(*this) has correct dimensions, then so will the hm1.T multiplication.
808 // So there is no need to check dimensions again.
809  HepMatrix::mIter a=temp.m.begin();
810  HepMatrix::mcIter b=hm1.m.begin();
811  HepMatrix::mIter e=a+hm1.num_row();
812  for(;a<e;) mret += (*(a++)) * (*(b++));
813  return mret;
814 }
815 
817 #ifdef HEP_GNU_OPTIMIZED_RETURN
818  return mret(hm1.num_col());
819 {
820 #else
821 {
822  HepSymMatrix mret(hm1.num_col());
823 #endif
824  HepMatrix temp = (*this)*hm1;
825  int n = hm1.num_col();
826  HepMatrix::mIter mrc = mret.m.begin();
827  HepMatrix::mIter temp1r = temp.m.begin();
828  for(int r=1;r<=mret.num_row();r++) {
829  HepMatrix::mcIter m11c = hm1.m.begin();
830  for(int c=1;c<=r;c++) {
831  register double tmp = 0.0;
832  for(int i=1;i<=hm1.num_row();i++) {
833  HepMatrix::mIter tempir = temp1r + n*(i-1);
834  HepMatrix::mcIter hm1ic = m11c + n*(i-1);
835  tmp+=(*(tempir))*(*(hm1ic));
836  }
837  *(mrc++) = tmp;
838  m11c++;
839  }
840  temp1r++;
841  }
842  return mret;
843 }
844 
845 void HepSymMatrix::invert(int &ifail) {
846 
847  ifail = 0;
848 
849  switch(nrow) {
850  case 3:
851  {
852  double det, temp;
853  double t1, t2, t3;
854  double c11,c12,c13,c22,c23,c33;
855  c11 = (*(m.begin()+2)) * (*(m.begin()+5)) - (*(m.begin()+4)) * (*(m.begin()+4));
856  c12 = (*(m.begin()+4)) * (*(m.begin()+3)) - (*(m.begin()+1)) * (*(m.begin()+5));
857  c13 = (*(m.begin()+1)) * (*(m.begin()+4)) - (*(m.begin()+2)) * (*(m.begin()+3));
858  c22 = (*(m.begin()+5)) * (*m.begin()) - (*(m.begin()+3)) * (*(m.begin()+3));
859  c23 = (*(m.begin()+3)) * (*(m.begin()+1)) - (*(m.begin()+4)) * (*m.begin());
860  c33 = (*m.begin()) * (*(m.begin()+2)) - (*(m.begin()+1)) * (*(m.begin()+1));
861  t1 = fabs(*m.begin());
862  t2 = fabs(*(m.begin()+1));
863  t3 = fabs(*(m.begin()+3));
864  if (t1 >= t2) {
865  if (t3 >= t1) {
866  temp = *(m.begin()+3);
867  det = c23*c12-c22*c13;
868  } else {
869  temp = *m.begin();
870  det = c22*c33-c23*c23;
871  }
872  } else if (t3 >= t2) {
873  temp = *(m.begin()+3);
874  det = c23*c12-c22*c13;
875  } else {
876  temp = *(m.begin()+1);
877  det = c13*c23-c12*c33;
878  }
879  if (det==0) {
880  ifail = 1;
881  return;
882  }
883  {
884  double ds = temp/det;
885  HepMatrix::mIter hmm = m.begin();
886  *(hmm++) = ds*c11;
887  *(hmm++) = ds*c12;
888  *(hmm++) = ds*c22;
889  *(hmm++) = ds*c13;
890  *(hmm++) = ds*c23;
891  *(hmm) = ds*c33;
892  }
893  }
894  break;
895  case 2:
896  {
897  double det, temp, ds;
898  det = (*m.begin())*(*(m.begin()+2)) - (*(m.begin()+1))*(*(m.begin()+1));
899  if (det==0) {
900  ifail = 1;
901  return;
902  }
903  ds = 1.0/det;
904  *(m.begin()+1) *= -ds;
905  temp = ds*(*(m.begin()+2));
906  *(m.begin()+2) = ds*(*m.begin());
907  *m.begin() = temp;
908  break;
909  }
910  case 1:
911  {
912  if ((*m.begin())==0) {
913  ifail = 1;
914  return;
915  }
916  *m.begin() = 1.0/(*m.begin());
917  break;
918  }
919  case 5:
920  {
921  invert5(ifail);
922  return;
923  }
924  case 6:
925  {
926  invert6(ifail);
927  return;
928  }
929  case 4:
930  {
931  invert4(ifail);
932  return;
933  }
934  default:
935  {
936  invertBunchKaufman(ifail);
937  return;
938  }
939  }
940  return; // inversion successful
941 }
942 
944  static const int max_array = 20;
945  // ir must point to an array which is ***1 longer than*** nrow
946  static std::vector<int> ir_vec (max_array+1);
947  if (ir_vec.size() <= static_cast<unsigned int>(nrow)) ir_vec.resize(nrow+1);
948  int * ir = &ir_vec[0];
949 
950  double det;
951  HepMatrix mt(*this);
952  int i = mt.dfact_matrix(det, ir);
953  if(i==0) return det;
954  return 0.0;
955 }
956 
957 double HepSymMatrix::trace() const {
958  double t = 0.0;
959  for (int i=0; i<nrow; i++)
960  t += *(m.begin() + (i+3)*i/2);
961  return t;
962 }
963 
965  // Bunch-Kaufman diagonal pivoting method
966  // It is decribed in J.R. Bunch, L. Kaufman (1977).
967  // "Some Stable Methods for Calculating Inertia and Solving Symmetric
968  // Linear Systems", Math. Comp. 31, p. 162-179. or in Gene H. Golub,
969  // Charles F. van Loan, "Matrix Computations" (the second edition
970  // has a bug.) and implemented in "lapack"
971  // Mario Stanke, 09/97
972 
973  int i, j, k, is;
974  int pivrow;
975 
976  // Establish the two working-space arrays needed: x and piv are
977  // used as pointers to arrays of doubles and ints respectively, each
978  // of length nrow. We do not want to reallocate each time through
979  // unless the size needs to grow. We do not want to leak memory, even
980  // by having a new without a delete that is only done once.
981 
982  static const int max_array = 25;
983 #ifdef DISABLE_ALLOC
984  static std::vector<double> xvec (max_array);
985  static std::vector<int> pivv (max_array);
986  typedef std::vector<int>::iterator pivIter;
987 #else
988  static std::vector<double,Alloc<double,25> > xvec (max_array);
989  static std::vector<int, Alloc<int, 25> > pivv (max_array);
990  typedef std::vector<int,Alloc<int,25> >::iterator pivIter;
991 #endif
992  if (xvec.size() < static_cast<unsigned int>(nrow)) xvec.resize(nrow);
993  if (pivv.size() < static_cast<unsigned int>(nrow)) pivv.resize(nrow);
994  // Note - resize shuld do nothing if the size is already larger than nrow,
995  // but on VC++ there are indications that it does so we check.
996  // Note - the data elements in a vector are guaranteed to be contiguous,
997  // so x[i] and piv[i] are optimally fast.
998  mIter x = xvec.begin();
999  // x[i] is used as helper storage, needs to have at least size nrow.
1000  pivIter piv = pivv.begin();
1001  // piv[i] is used to store details of exchanges
1002 
1003  double temp1, temp2;
1004  HepMatrix::mIter ip, mjj, iq;
1005  double lambda, sigma;
1006  const double alpha = .6404; // = (1+sqrt(17))/8
1007  const double epsilon = 32*DBL_EPSILON;
1008  // whenever a sum of two doubles is below or equal to epsilon
1009  // it is set to zero.
1010  // this constant could be set to zero but then the algorithm
1011  // doesn't neccessarily detect that a matrix is singular
1012 
1013  for (i = 0; i < nrow; ++i) piv[i] = i+1;
1014 
1015  ifail = 0;
1016 
1017  // compute the factorization P*A*P^T = L * D * L^T
1018  // L is unit lower triangular, D is direct sum of 1x1 and 2x2 matrices
1019  // L and D^-1 are stored in A = *this, P is stored in piv[]
1020 
1021  for (j=1; j < nrow; j+=is) // main loop over columns
1022  {
1023  mjj = m.begin() + j*(j-1)/2 + j-1;
1024  lambda = 0; // compute lambda = max of A(j+1:n,j)
1025  pivrow = j+1;
1026  //ip = m.begin() + (j+1)*j/2 + j-1;
1027  for (i=j+1; i <= nrow ; ++i) {
1028  // calculate ip to avoid going off end of storage array
1029  ip = m.begin() + (i-1)*i/2 + j-1;
1030  if (fabs(*ip) > lambda) {
1031  lambda = fabs(*ip);
1032  pivrow = i;
1033  }
1034  } // for i
1035  if (lambda == 0 ) {
1036  if (*mjj == 0) {
1037  ifail = 1;
1038  return;
1039  }
1040  is=1;
1041  *mjj = 1./ *mjj;
1042  } else { // lambda == 0
1043  if (fabs(*mjj) >= lambda*alpha) {
1044  is=1;
1045  pivrow=j;
1046  } else { // fabs(*mjj) >= lambda*alpha
1047  sigma = 0; // compute sigma = max A(pivrow, j:pivrow-1)
1048  ip = m.begin() + pivrow*(pivrow-1)/2+j-1;
1049  for (k=j; k < pivrow; k++) {
1050  if (fabs(*ip) > sigma) sigma = fabs(*ip);
1051  ip++;
1052  } // for k
1053  if (sigma * fabs(*mjj) >= alpha * lambda * lambda) {
1054  is=1;
1055  pivrow = j;
1056  } else if (fabs(*(m.begin()+pivrow*(pivrow-1)/2+pivrow-1))
1057  >= alpha * sigma) {
1058  is=1;
1059  } else {
1060  is=2;
1061  } // if sigma...
1062  } // fabs(*mjj) >= lambda*alpha
1063  if (pivrow == j) { // no permutation neccessary
1064  piv[j-1] = pivrow;
1065  if (*mjj == 0) {
1066  ifail=1;
1067  return;
1068  }
1069  temp2 = *mjj = 1./ *mjj; // invert D(j,j)
1070 
1071  // update A(j+1:n, j+1,n)
1072  for (i=j+1; i <= nrow; i++) {
1073  temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2;
1074  ip = m.begin()+i*(i-1)/2+j;
1075  for (k=j+1; k<=i; k++) {
1076  *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1);
1077  if (fabs(*ip) <= epsilon)
1078  *ip=0;
1079  ip++;
1080  }
1081  } // for i
1082  // update L
1083  //ip = m.begin() + (j+1)*j/2 + j-1;
1084  for (i=j+1; i <= nrow; ++i) {
1085  // calculate ip to avoid going off end of storage array
1086  ip = m.begin() + (i-1)*i/2 + j-1;
1087  *ip *= temp2;
1088  }
1089  } else if (is==1) { // 1x1 pivot
1090  piv[j-1] = pivrow;
1091 
1092  // interchange rows and columns j and pivrow in
1093  // submatrix (j:n,j:n)
1094  ip = m.begin() + pivrow*(pivrow-1)/2 + j;
1095  for (i=j+1; i < pivrow; i++, ip++) {
1096  temp1 = *(m.begin() + i*(i-1)/2 + j-1);
1097  *(m.begin() + i*(i-1)/2 + j-1)= *ip;
1098  *ip = temp1;
1099  } // for i
1100  temp1 = *mjj;
1101  *mjj = *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1);
1102  *(m.begin()+pivrow*(pivrow-1)/2+pivrow-1) = temp1;
1103  ip = m.begin() + (pivrow+1)*pivrow/2 + j-1;
1104  iq = ip + pivrow-j;
1105  for (i = pivrow+1; i <= nrow; ip += i, iq += i++) {
1106  temp1 = *iq;
1107  *iq = *ip;
1108  *ip = temp1;
1109  } // for i
1110 
1111  if (*mjj == 0) {
1112  ifail = 1;
1113  return;
1114  } // *mjj == 0
1115  temp2 = *mjj = 1./ *mjj; // invert D(j,j)
1116 
1117  // update A(j+1:n, j+1:n)
1118  for (i = j+1; i <= nrow; i++) {
1119  temp1 = *(m.begin() + i*(i-1)/2 + j-1) * temp2;
1120  ip = m.begin()+i*(i-1)/2+j;
1121  for (k=j+1; k<=i; k++) {
1122  *ip -= temp1 * *(m.begin() + k*(k-1)/2 + j-1);
1123  if (fabs(*ip) <= epsilon)
1124  *ip=0;
1125  ip++;
1126  } // for k
1127  } // for i
1128  // update L
1129  //ip = m.begin() + (j+1)*j/2 + j-1;
1130  for (i=j+1; i <= nrow; ++i) {
1131  // calculate ip to avoid going off end of storage array
1132  ip = m.begin() + (i-1)*i/2 + j-1;
1133  *ip *= temp2;
1134  }
1135  } else { // is=2, ie use a 2x2 pivot
1136  piv[j-1] = -pivrow;
1137  piv[j] = 0; // that means this is the second row of a 2x2 pivot
1138 
1139  if (j+1 != pivrow) {
1140  // interchange rows and columns j+1 and pivrow in
1141  // submatrix (j:n,j:n)
1142  ip = m.begin() + pivrow*(pivrow-1)/2 + j+1;
1143  for (i=j+2; i < pivrow; i++, ip++) {
1144  temp1 = *(m.begin() + i*(i-1)/2 + j);
1145  *(m.begin() + i*(i-1)/2 + j) = *ip;
1146  *ip = temp1;
1147  } // for i
1148  temp1 = *(mjj + j + 1);
1149  *(mjj + j + 1) =
1150  *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1);
1151  *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1;
1152  temp1 = *(mjj + j);
1153  *(mjj + j) = *(m.begin() + pivrow*(pivrow-1)/2 + j-1);
1154  *(m.begin() + pivrow*(pivrow-1)/2 + j-1) = temp1;
1155  ip = m.begin() + (pivrow+1)*pivrow/2 + j;
1156  iq = ip + pivrow-(j+1);
1157  for (i = pivrow+1; i <= nrow; ip += i, iq += i++) {
1158  temp1 = *iq;
1159  *iq = *ip;
1160  *ip = temp1;
1161  } // for i
1162  } // j+1 != pivrow
1163  // invert D(j:j+1,j:j+1)
1164  temp2 = *mjj * *(mjj + j + 1) - *(mjj + j) * *(mjj + j);
1165  if (temp2 == 0) {
1166  std::cerr
1167  << "SymMatrix::bunch_invert: error in pivot choice"
1168  << std::endl;
1169  }
1170  temp2 = 1. / temp2;
1171  // this quotient is guaranteed to exist by the choice
1172  // of the pivot
1173  temp1 = *mjj;
1174  *mjj = *(mjj + j + 1) * temp2;
1175  *(mjj + j + 1) = temp1 * temp2;
1176  *(mjj + j) = - *(mjj + j) * temp2;
1177 
1178  if (j < nrow-1) { // otherwise do nothing
1179  // update A(j+2:n, j+2:n)
1180  for (i=j+2; i <= nrow ; i++) {
1181  ip = m.begin() + i*(i-1)/2 + j-1;
1182  temp1 = *ip * *mjj + *(ip + 1) * *(mjj + j);
1183  if (fabs(temp1 ) <= epsilon) temp1 = 0;
1184  temp2 = *ip * *(mjj + j) + *(ip + 1) * *(mjj + j + 1);
1185  if (fabs(temp2 ) <= epsilon) temp2 = 0;
1186  for (k = j+2; k <= i ; k++) {
1187  ip = m.begin() + i*(i-1)/2 + k-1;
1188  iq = m.begin() + k*(k-1)/2 + j-1;
1189  *ip -= temp1 * *iq + temp2 * *(iq+1);
1190  if (fabs(*ip) <= epsilon)
1191  *ip = 0;
1192  } // for k
1193  } // for i
1194  // update L
1195  for (i=j+2; i <= nrow ; i++) {
1196  ip = m.begin() + i*(i-1)/2 + j-1;
1197  temp1 = *ip * *mjj + *(ip+1) * *(mjj + j);
1198  if (fabs(temp1) <= epsilon) temp1 = 0;
1199  *(ip+1) = *ip * *(mjj + j)
1200  + *(ip+1) * *(mjj + j + 1);
1201  if (fabs(*(ip+1)) <= epsilon) *(ip+1) = 0;
1202  *ip = temp1;
1203  } // for k
1204  } // j < nrow-1
1205  }
1206  }
1207  } // end of main loop over columns
1208 
1209  if (j == nrow) { // the the last pivot is 1x1
1210  mjj = m.begin() + j*(j-1)/2 + j-1;
1211  if (*mjj == 0) {
1212  ifail = 1;
1213  return;
1214  } else { *mjj = 1. / *mjj; }
1215  } // end of last pivot code
1216 
1217  // computing the inverse from the factorization
1218 
1219  for (j = nrow ; j >= 1 ; j -= is) // loop over columns
1220  {
1221  mjj = m.begin() + j*(j-1)/2 + j-1;
1222  if (piv[j-1] > 0) { // 1x1 pivot, compute column j of inverse
1223  is = 1;
1224  if (j < nrow) {
1225  //ip = m.begin() + (j+1)*j/2 + j-1;
1226  //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip;
1227  ip = m.begin() + (j+1)*j/2 - 1;
1228  for (i=0; i < nrow-j; ++i) {
1229  ip += i + j;
1230  x[i] = *ip;
1231  }
1232  for (i=j+1; i<=nrow ; i++) {
1233  temp2=0;
1234  ip = m.begin() + i*(i-1)/2 + j;
1235  for (k=0; k <= i-j-1; k++) temp2 += *ip++ * x[k];
1236  // avoid setting ip outside the bounds of the storage array
1237  ip -= 1;
1238  // using the value of k from the previous loop
1239  for ( ; k < nrow-j; ++k) {
1240  ip += j+k;
1241  temp2 += *ip * x[k];
1242  }
1243  *(m.begin()+ i*(i-1)/2 + j-1) = -temp2;
1244  } // for i
1245  temp2 = 0;
1246  //ip = m.begin() + (j+1)*j/2 + j-1;
1247  //for (k=0; k < nrow-j; ip += 1+j+k++)
1248  //temp2 += x[k] * *ip;
1249  ip = m.begin() + (j+1)*j/2 - 1;
1250  for (k=0; k < nrow-j; ++k) {
1251  ip += j+k;
1252  temp2 += x[k] * *ip;
1253  }
1254  *mjj -= temp2;
1255  } // j < nrow
1256  } else { //2x2 pivot, compute columns j and j-1 of the inverse
1257  if (piv[j-1] != 0)
1258  std::cerr << "error in piv" << piv[j-1] << std::endl;
1259  is=2;
1260  if (j < nrow) {
1261  //ip = m.begin() + (j+1)*j/2 + j-1;
1262  //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip;
1263  ip = m.begin() + (j+1)*j/2 - 1;
1264  for (i=0; i < nrow-j; ++i) {
1265  ip += i + j;
1266  x[i] = *ip;
1267  }
1268  for (i=j+1; i<=nrow ; i++) {
1269  temp2 = 0;
1270  ip = m.begin() + i*(i-1)/2 + j;
1271  for (k=0; k <= i-j-1; k++)
1272  temp2 += *ip++ * x[k];
1273  for (ip += i-1; k < nrow-j; ip += 1+j+k++)
1274  temp2 += *ip * x[k];
1275  *(m.begin()+ i*(i-1)/2 + j-1) = -temp2;
1276  } // for i
1277  temp2 = 0;
1278  //ip = m.begin() + (j+1)*j/2 + j-1;
1279  //for (k=0; k < nrow-j; ip += 1+j+k++) temp2 += x[k] * *ip;
1280  ip = m.begin() + (j+1)*j/2 - 1;
1281  for (k=0; k < nrow-j; ++k) {
1282  ip += k + j;
1283  temp2 += x[k] * *ip;
1284  }
1285  *mjj -= temp2;
1286  temp2 = 0;
1287  //ip = m.begin() + (j+1)*j/2 + j-2;
1288  //for (i=j+1; i <= nrow; ip += i++) temp2 += *ip * *(ip+1);
1289  ip = m.begin() + (j+1)*j/2 - 2;
1290  for (i=j+1; i <= nrow; ++i) {
1291  ip += i - 1;
1292  temp2 += *ip * *(ip+1);
1293  }
1294  *(mjj-1) -= temp2;
1295  //ip = m.begin() + (j+1)*j/2 + j-2;
1296  //for (i=0; i < nrow-j; ip += 1+j+i++) x[i] = *ip;
1297  ip = m.begin() + (j+1)*j/2 - 2;
1298  for (i=0; i < nrow-j; ++i) {
1299  ip += i + j;
1300  x[i] = *ip;
1301  }
1302  for (i=j+1; i <= nrow ; i++) {
1303  temp2 = 0;
1304  ip = m.begin() + i*(i-1)/2 + j;
1305  for (k=0; k <= i-j-1; k++)
1306  temp2 += *ip++ * x[k];
1307  for (ip += i-1; k < nrow-j; ip += 1+j+k++)
1308  temp2 += *ip * x[k];
1309  *(m.begin()+ i*(i-1)/2 + j-2)= -temp2;
1310  } // for i
1311  temp2 = 0;
1312  //ip = m.begin() + (j+1)*j/2 + j-2;
1313  //for (k=0; k < nrow-j; ip += 1+j+k++)
1314  // temp2 += x[k] * *ip;
1315  ip = m.begin() + (j+1)*j/2 - 2;
1316  for (k=0; k < nrow-j; ++k) {
1317  ip += k + j;
1318  temp2 += x[k] * *ip;
1319  }
1320  *(mjj-j) -= temp2;
1321  } // j < nrow
1322  } // else piv[j-1] > 0
1323 
1324  // interchange rows and columns j and piv[j-1]
1325  // or rows and columns j and -piv[j-2]
1326 
1327  pivrow = (piv[j-1]==0)? -piv[j-2] : piv[j-1];
1328  ip = m.begin() + pivrow*(pivrow-1)/2 + j;
1329  for (i=j+1;i < pivrow; i++, ip++) {
1330  temp1 = *(m.begin() + i*(i-1)/2 + j-1);
1331  *(m.begin() + i*(i-1)/2 + j-1) = *ip;
1332  *ip = temp1;
1333  } // for i
1334  temp1 = *mjj;
1335  *mjj = *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1);
1336  *(m.begin() + pivrow*(pivrow-1)/2 + pivrow-1) = temp1;
1337  if (is==2) {
1338  temp1 = *(mjj-1);
1339  *(mjj-1) = *( m.begin() + pivrow*(pivrow-1)/2 + j-2);
1340  *( m.begin() + pivrow*(pivrow-1)/2 + j-2) = temp1;
1341  } // is==2
1342 
1343  // problem right here
1344  if( pivrow < nrow ) {
1345  ip = m.begin() + (pivrow+1)*pivrow/2 + j-1; // &A(i,j)
1346  // adding parenthesis for VC++
1347  iq = ip + (pivrow-j);
1348  for (i = pivrow+1; i <= nrow; i++) {
1349  temp1 = *iq;
1350  *iq = *ip;
1351  *ip = temp1;
1352  if( i < nrow ) {
1353  ip += i;
1354  iq += i;
1355  }
1356  } // for i
1357  } // pivrow < nrow
1358  } // end of loop over columns (in computing inverse from factorization)
1359 
1360  return; // inversion successful
1361 
1362 }
1363 
1364 } // namespace CLHEP
Hep3Vector operator+(const Hep3Vector &, const Hep3Vector &)
int num_col() const
void invertBunchKaufman(int &ifail)
Definition: SymMatrix.cc:964
HepMatrix & operator=(const HepMatrix &)
Definition: Matrix.cc:417
#define SIMPLE_UOP(OPER)
Definition: SymMatrix.cc:29
virtual int num_col() const
Definition: Matrix.cc:122
HepLorentzVector operator/(const HepLorentzVector &, double a)
#define SIMPLE_BOP(OPER)
Definition: SymMatrix.cc:34
HepSymMatrix apply(double(*f)(double, int, int)) const
Definition: SymMatrix.cc:700
HepSymMatrix vT_times_v(const HepVector &v)
Definition: SymMatrix.cc:542
std::vector< double, Alloc< double, 25 > >::const_iterator mcIter
virtual int num_row() const
Definition: Vector.cc:117
double determinant() const
Definition: SymMatrix.cc:943
virtual ~HepSymMatrix()
Definition: SymMatrix.cc:103
HepLorentzRotation operator*(const HepRotation &r, const HepLorentzRotation &lt)
double trace() const
Definition: SymMatrix.cc:957
HepDiagMatrix dsum(const HepDiagMatrix &s1, const HepDiagMatrix &s2)
Definition: DiagMatrix.cc:164
#define CHK_DIM_1(c1, r2, fun)
Definition: SymMatrix.cc:52
HepSymMatrix & operator*=(double t)
Definition: SymMatrix.cc:614
int num_row() const
HepSymMatrix similarityT(const HepMatrix &hm1) const
Definition: SymMatrix.cc:816
void init()
Definition: testRandom.cc:27
HepSymMatrix & operator-=(const HepSymMatrix &hm2)
Definition: SymMatrix.cc:601
std::vector< double, Alloc< double, 25 > >::iterator mIter
HepSymMatrix similarity(const HepMatrix &hm1) const
Definition: SymMatrix.cc:737
HepSymMatrix & operator+=(const HepSymMatrix &hm2)
Definition: SymMatrix.cc:578
HepSymMatrix & operator/=(double t)
Definition: SymMatrix.cc:608
void f(void g())
Definition: excDblThrow.cc:38
static void error(const char *s)
Definition: GenMatrix.cc:73
Hep3Vector operator-(const Hep3Vector &, const Hep3Vector &)
void assign(const HepMatrix &hm2)
Definition: SymMatrix.cc:718
HepSymMatrix sub(int min_row, int max_row) const
Definition: SymMatrix.cc:134
#define SIMPLE_TOP(OPER)
Definition: SymMatrix.cc:40
HepSymMatrix operator-() const
Definition: SymMatrix.cc:214
HepMatrix & operator-=(const HepMatrix &)
Definition: Matrix.cc:398
#define CHK_DIM_2(r1, r2, c1, c2, fun)
Definition: SymMatrix.cc:47
HepMatrix & operator+=(const HepMatrix &)
Definition: Matrix.cc:391
virtual int num_row() const
Definition: Matrix.cc:120
virtual int num_size() const
Definition: Matrix.cc:124
std::ostream & operator<<(std::ostream &os, const HepAxisAngle &aa)
Definition: AxisAngle.cc:86
HepSymMatrix & operator=(const HepSymMatrix &hm2)
Definition: SymMatrix.cc:645