14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/Cd(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle_() continue;
256 #define myceiling_(w) {ceil(w)}
257 #define myhuge_(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc_(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static doublecomplex c_b1 = {1.,0.};
516 static doublecomplex c_b2 = {0.,0.};
518 /* > \brief \b ZSYTRI_3X */
520 /* =========== DOCUMENTATION =========== */
522 /* Online html documentation available at */
523 /* http://www.netlib.org/lapack/explore-html/ */
526 /* > Download ZSYTRI_3X + dependencies */
527 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zsytri_
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zsytri_
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zsytri_
541 /* SUBROUTINE ZSYTRI_3X( UPLO, N, A, LDA, E, IPIV, WORK, NB, INFO ) */
544 /* INTEGER INFO, LDA, N, NB */
545 /* INTEGER IPIV( * ) */
546 /* COMPLEX*16 A( LDA, * ), E( * ), WORK( N+NB+1, * ) */
549 /* > \par Purpose: */
553 /* > ZSYTRI_3X computes the inverse of a complex symmetric indefinite */
554 /* > matrix A using the factorization computed by ZSYTRF_RK or ZSYTRF_BK: */
556 /* > A = P*U*D*(U**T)*(P**T) or A = P*L*D*(L**T)*(P**T), */
558 /* > where U (or L) is unit upper (or lower) triangular matrix, */
559 /* > U**T (or L**T) is the transpose of U (or L), P is a permutation */
560 /* > matrix, P**T is the transpose of P, and D is symmetric and block */
561 /* > diagonal with 1-by-1 and 2-by-2 diagonal blocks. */
563 /* > This is the blocked version of the algorithm, calling Level 3 BLAS. */
569 /* > \param[in] UPLO */
571 /* > UPLO is CHARACTER*1 */
572 /* > Specifies whether the details of the factorization are */
573 /* > stored as an upper or lower triangular matrix. */
574 /* > = 'U': Upper triangle of A is stored; */
575 /* > = 'L': Lower triangle of A is stored. */
581 /* > The order of the matrix A. N >= 0. */
584 /* > \param[in,out] A */
586 /* > A is COMPLEX*16 array, dimension (LDA,N) */
587 /* > On entry, diagonal of the block diagonal matrix D and */
588 /* > factors U or L as computed by ZSYTRF_RK and ZSYTRF_BK: */
589 /* > a) ONLY diagonal elements of the symmetric block diagonal */
590 /* > matrix D on the diagonal of A, i.e. D(k,k) = A(k,k); */
591 /* > (superdiagonal (or subdiagonal) elements of D */
592 /* > should be provided on entry in array E), and */
593 /* > b) If UPLO = 'U': factor U in the superdiagonal part of A. */
594 /* > If UPLO = 'L': factor L in the subdiagonal part of A. */
596 /* > On exit, if INFO = 0, the symmetric inverse of the original */
598 /* > If UPLO = 'U': the upper triangular part of the inverse */
599 /* > is formed and the part of A below the diagonal is not */
601 /* > If UPLO = 'L': the lower triangular part of the inverse */
602 /* > is formed and the part of A above the diagonal is not */
606 /* > \param[in] LDA */
608 /* > LDA is INTEGER */
609 /* > The leading dimension of the array A. LDA >= f2cmax(1,N). */
614 /* > E is COMPLEX*16 array, dimension (N) */
615 /* > On entry, contains the superdiagonal (or subdiagonal) */
616 /* > elements of the symmetric block diagonal matrix D */
617 /* > with 1-by-1 or 2-by-2 diagonal blocks, where */
618 /* > If UPLO = 'U': E(i) = D(i-1,i), i=2:N, E(1) not referenced; */
619 /* > If UPLO = 'L': E(i) = D(i+1,i), i=1:N-1, E(N) not referenced. */
621 /* > NOTE: For 1-by-1 diagonal block D(k), where */
622 /* > 1 <= k <= N, the element E(k) is not referenced in both */
623 /* > UPLO = 'U' or UPLO = 'L' cases. */
626 /* > \param[in] IPIV */
628 /* > IPIV is INTEGER array, dimension (N) */
629 /* > Details of the interchanges and the block structure of D */
630 /* > as determined by ZSYTRF_RK or ZSYTRF_BK. */
633 /* > \param[out] WORK */
635 /* > WORK is COMPLEX*16 array, dimension (N+NB+1,NB+3). */
638 /* > \param[in] NB */
640 /* > NB is INTEGER */
644 /* > \param[out] INFO */
646 /* > INFO is INTEGER */
647 /* > = 0: successful exit */
648 /* > < 0: if INFO = -i, the i-th argument had an illegal value */
649 /* > > 0: if INFO = i, D(i,i) = 0; the matrix is singular and its */
650 /* > inverse could not be computed. */
656 /* > \author Univ. of Tennessee */
657 /* > \author Univ. of California Berkeley */
658 /* > \author Univ. of Colorado Denver */
659 /* > \author NAG Ltd. */
661 /* > \date June 2017 */
663 /* > \ingroup complex16SYcomputational */
665 /* > \par Contributors: */
666 /* ================== */
669 /* > June 2017, Igor Kozachenko, */
670 /* > Computer Science Division, */
671 /* > University of California, Berkeley */
675 /* ===================================================================== */
676 /* Subroutine */ int zsytri_3x_(char *uplo, integer *n, doublecomplex *a,
677 integer *lda, doublecomplex *e, integer *ipiv, doublecomplex *work,
678 integer *nb, integer *info)
680 /* System generated locals */
681 integer a_dim1, a_offset, work_dim1, work_offset, i__1, i__2, i__3, i__4,
683 doublecomplex z__1, z__2, z__3;
685 /* Local variables */
687 doublecomplex akkp1, d__;
689 extern /* Subroutine */ int zsyswapr_(char *, integer *, doublecomplex *,
690 integer *, integer *, integer *);
692 extern logical lsame_(char *, char *);
693 extern /* Subroutine */ int zgemm_(char *, char *, integer *, integer *,
694 integer *, doublecomplex *, doublecomplex *, integer *,
695 doublecomplex *, integer *, doublecomplex *, doublecomplex *,
698 extern /* Subroutine */ int ztrmm_(char *, char *, char *, char *,
699 integer *, integer *, doublecomplex *, doublecomplex *, integer *,
700 doublecomplex *, integer *);
701 doublecomplex ak, u01_i_j__;
703 doublecomplex u11_i_j__;
705 extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
707 extern /* Subroutine */ int ztrtri_(char *, char *, integer *,
708 doublecomplex *, integer *, integer *);
710 doublecomplex akp1, u01_ip1_j__, u11_ip1_j__;
713 /* -- LAPACK computational routine (version 3.7.1) -- */
714 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
715 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
719 /* ===================================================================== */
722 /* Test the input parameters. */
724 /* Parameter adjustments */
726 a_offset = 1 + a_dim1 * 1;
730 work_dim1 = *n + *nb + 1;
731 work_offset = 1 + work_dim1 * 1;
736 upper = lsame_(uplo, "U");
737 if (! upper && ! lsame_(uplo, "L")) {
741 } else if (*lda < f2cmax(1,*n)) {
745 /* Quick return if possible */
749 xerbla_("ZSYTRI_3X", &i__1, (ftnlen)9);
756 /* Workspace got Non-diag elements of D */
759 for (k = 1; k <= i__1; ++k) {
760 i__2 = k + work_dim1;
762 work[i__2].r = e[i__3].r, work[i__2].i = e[i__3].i;
765 /* Check that the diagonal matrix D is nonsingular. */
769 /* Upper triangular storage: examine D from bottom to top */
771 for (*info = *n; *info >= 1; --(*info)) {
772 i__1 = *info + *info * a_dim1;
773 if (ipiv[*info] > 0 && (a[i__1].r == 0. && a[i__1].i == 0.)) {
779 /* Lower triangular storage: examine D from top to bottom. */
782 for (*info = 1; *info <= i__1; ++(*info)) {
783 i__2 = *info + *info * a_dim1;
784 if (ipiv[*info] > 0 && (a[i__2].r == 0. && a[i__2].i == 0.)) {
792 /* Splitting Workspace */
793 /* U01 is a block ( N, NB+1 ) */
794 /* The first element of U01 is in WORK( 1, 1 ) */
795 /* U11 is a block ( NB+1, NB+1 ) */
796 /* The first element of U11 is in WORK( N+1, 1 ) */
800 /* INVD is a block ( N, 2 ) */
801 /* The first element of INVD is in WORK( 1, INVD ) */
808 /* invA = P * inv(U**T) * inv(D) * inv(U) * P**T. */
810 ztrtri_(uplo, "U", n, &a[a_offset], lda, info);
812 /* inv(D) and inv(D) * inv(U) */
817 /* 1 x 1 diagonal NNB */
818 i__1 = k + invd * work_dim1;
819 z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
820 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
821 i__1 = k + (invd + 1) * work_dim1;
822 work[i__1].r = 0., work[i__1].i = 0.;
824 /* 2 x 2 diagonal NNB */
825 i__1 = k + 1 + work_dim1;
826 t.r = work[i__1].r, t.i = work[i__1].i;
827 z_div(&z__1, &a[k + k * a_dim1], &t);
828 ak.r = z__1.r, ak.i = z__1.i;
829 z_div(&z__1, &a[k + 1 + (k + 1) * a_dim1], &t);
830 akp1.r = z__1.r, akp1.i = z__1.i;
831 z_div(&z__1, &work[k + 1 + work_dim1], &t);
832 akkp1.r = z__1.r, akkp1.i = z__1.i;
833 z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r *
834 akp1.i + ak.i * akp1.r;
835 z__2.r = z__3.r - 1., z__2.i = z__3.i + 0.;
836 z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i +
838 d__.r = z__1.r, d__.i = z__1.i;
839 i__1 = k + invd * work_dim1;
840 z_div(&z__1, &akp1, &d__);
841 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
842 i__1 = k + 1 + (invd + 1) * work_dim1;
843 z_div(&z__1, &ak, &d__);
844 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
845 i__1 = k + (invd + 1) * work_dim1;
846 z__2.r = -akkp1.r, z__2.i = -akkp1.i;
847 z_div(&z__1, &z__2, &d__);
848 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
849 i__1 = k + 1 + invd * work_dim1;
850 i__2 = k + (invd + 1) * work_dim1;
851 work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
857 /* inv(U**T) = (inv(U))**T */
859 /* inv(U**T) * inv(D) * inv(U) */
868 /* count negative elements, */
870 for (i__ = cut + 1 - nnb; i__ <= i__1; ++i__) {
875 /* need a even number for a clear cut */
876 if (icount % 2 == 1) {
885 for (i__ = 1; i__ <= i__1; ++i__) {
887 for (j = 1; j <= i__2; ++j) {
888 i__3 = i__ + j * work_dim1;
889 i__4 = i__ + (cut + j) * a_dim1;
890 work[i__3].r = a[i__4].r, work[i__3].i = a[i__4].i;
897 for (i__ = 1; i__ <= i__1; ++i__) {
898 i__2 = u11 + i__ + i__ * work_dim1;
899 work[i__2].r = 1., work[i__2].i = 0.;
901 for (j = 1; j <= i__2; ++j) {
902 i__3 = u11 + i__ + j * work_dim1;
903 work[i__3].r = 0., work[i__3].i = 0.;
906 for (j = i__ + 1; j <= i__2; ++j) {
907 i__3 = u11 + i__ + j * work_dim1;
908 i__4 = cut + i__ + (cut + j) * a_dim1;
909 work[i__3].r = a[i__4].r, work[i__3].i = a[i__4].i;
919 for (j = 1; j <= i__1; ++j) {
920 i__2 = i__ + j * work_dim1;
921 i__3 = i__ + invd * work_dim1;
922 i__4 = i__ + j * work_dim1;
923 z__1.r = work[i__3].r * work[i__4].r - work[i__3].i *
924 work[i__4].i, z__1.i = work[i__3].r * work[
925 i__4].i + work[i__3].i * work[i__4].r;
926 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
930 for (j = 1; j <= i__1; ++j) {
931 i__2 = i__ + j * work_dim1;
932 u01_i_j__.r = work[i__2].r, u01_i_j__.i = work[i__2]
934 i__2 = i__ + 1 + j * work_dim1;
935 u01_ip1_j__.r = work[i__2].r, u01_ip1_j__.i = work[
937 i__2 = i__ + j * work_dim1;
938 i__3 = i__ + invd * work_dim1;
939 z__2.r = work[i__3].r * u01_i_j__.r - work[i__3].i *
940 u01_i_j__.i, z__2.i = work[i__3].r *
941 u01_i_j__.i + work[i__3].i * u01_i_j__.r;
942 i__4 = i__ + (invd + 1) * work_dim1;
943 z__3.r = work[i__4].r * u01_ip1_j__.r - work[i__4].i *
944 u01_ip1_j__.i, z__3.i = work[i__4].r *
945 u01_ip1_j__.i + work[i__4].i * u01_ip1_j__.r;
946 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
947 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
948 i__2 = i__ + 1 + j * work_dim1;
949 i__3 = i__ + 1 + invd * work_dim1;
950 z__2.r = work[i__3].r * u01_i_j__.r - work[i__3].i *
951 u01_i_j__.i, z__2.i = work[i__3].r *
952 u01_i_j__.i + work[i__3].i * u01_i_j__.r;
953 i__4 = i__ + 1 + (invd + 1) * work_dim1;
954 z__3.r = work[i__4].r * u01_ip1_j__.r - work[i__4].i *
955 u01_ip1_j__.i, z__3.i = work[i__4].r *
956 u01_ip1_j__.i + work[i__4].i * u01_ip1_j__.r;
957 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
958 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
969 if (ipiv[cut + i__] > 0) {
971 for (j = i__; j <= i__1; ++j) {
972 i__2 = u11 + i__ + j * work_dim1;
973 i__3 = cut + i__ + invd * work_dim1;
974 i__4 = u11 + i__ + j * work_dim1;
975 z__1.r = work[i__3].r * work[i__4].r - work[i__3].i *
976 work[i__4].i, z__1.i = work[i__3].r * work[
977 i__4].i + work[i__3].i * work[i__4].r;
978 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
982 for (j = i__; j <= i__1; ++j) {
983 i__2 = u11 + i__ + j * work_dim1;
984 u11_i_j__.r = work[i__2].r, u11_i_j__.i = work[i__2]
986 i__2 = u11 + i__ + 1 + j * work_dim1;
987 u11_ip1_j__.r = work[i__2].r, u11_ip1_j__.i = work[
989 i__2 = u11 + i__ + j * work_dim1;
990 i__3 = cut + i__ + invd * work_dim1;
991 i__4 = u11 + i__ + j * work_dim1;
992 z__2.r = work[i__3].r * work[i__4].r - work[i__3].i *
993 work[i__4].i, z__2.i = work[i__3].r * work[
994 i__4].i + work[i__3].i * work[i__4].r;
995 i__5 = cut + i__ + (invd + 1) * work_dim1;
996 i__6 = u11 + i__ + 1 + j * work_dim1;
997 z__3.r = work[i__5].r * work[i__6].r - work[i__5].i *
998 work[i__6].i, z__3.i = work[i__5].r * work[
999 i__6].i + work[i__5].i * work[i__6].r;
1000 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1001 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1002 i__2 = u11 + i__ + 1 + j * work_dim1;
1003 i__3 = cut + i__ + 1 + invd * work_dim1;
1004 z__2.r = work[i__3].r * u11_i_j__.r - work[i__3].i *
1005 u11_i_j__.i, z__2.i = work[i__3].r *
1006 u11_i_j__.i + work[i__3].i * u11_i_j__.r;
1007 i__4 = cut + i__ + 1 + (invd + 1) * work_dim1;
1008 z__3.r = work[i__4].r * u11_ip1_j__.r - work[i__4].i *
1009 u11_ip1_j__.i, z__3.i = work[i__4].r *
1010 u11_ip1_j__.i + work[i__4].i * u11_ip1_j__.r;
1011 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1012 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1019 /* U11**T * invD1 * U11 -> U11 */
1021 i__1 = *n + *nb + 1;
1022 ztrmm_("L", "U", "T", "U", &nnb, &nnb, &c_b1, &a[cut + 1 + (cut +
1023 1) * a_dim1], lda, &work[u11 + 1 + work_dim1], &i__1);
1026 for (i__ = 1; i__ <= i__1; ++i__) {
1028 for (j = i__; j <= i__2; ++j) {
1029 i__3 = cut + i__ + (cut + j) * a_dim1;
1030 i__4 = u11 + i__ + j * work_dim1;
1031 a[i__3].r = work[i__4].r, a[i__3].i = work[i__4].i;
1035 /* U01**T * invD * U01 -> A( CUT+I, CUT+J ) */
1037 i__1 = *n + *nb + 1;
1038 i__2 = *n + *nb + 1;
1039 zgemm_("T", "N", &nnb, &nnb, &cut, &c_b1, &a[(cut + 1) * a_dim1 +
1040 1], lda, &work[work_offset], &i__1, &c_b2, &work[u11 + 1
1041 + work_dim1], &i__2);
1043 /* U11 = U11**T * invD1 * U11 + U01**T * invD * U01 */
1046 for (i__ = 1; i__ <= i__1; ++i__) {
1048 for (j = i__; j <= i__2; ++j) {
1049 i__3 = cut + i__ + (cut + j) * a_dim1;
1050 i__4 = cut + i__ + (cut + j) * a_dim1;
1051 i__5 = u11 + i__ + j * work_dim1;
1052 z__1.r = a[i__4].r + work[i__5].r, z__1.i = a[i__4].i +
1054 a[i__3].r = z__1.r, a[i__3].i = z__1.i;
1058 /* U01 = U00**T * invD0 * U01 */
1060 i__1 = *n + *nb + 1;
1061 ztrmm_("L", uplo, "T", "U", &cut, &nnb, &c_b1, &a[a_offset], lda,
1062 &work[work_offset], &i__1);
1067 for (i__ = 1; i__ <= i__1; ++i__) {
1069 for (j = 1; j <= i__2; ++j) {
1070 i__3 = i__ + (cut + j) * a_dim1;
1071 i__4 = i__ + j * work_dim1;
1072 a[i__3].r = work[i__4].r, a[i__3].i = work[i__4].i;
1080 /* Apply PERMUTATIONS P and P**T: */
1081 /* P * inv(U**T) * inv(D) * inv(U) * P**T. */
1082 /* Interchange rows and columns I and IPIV(I) in reverse order */
1083 /* from the formation order of IPIV vector for Upper case. */
1085 /* ( We can use a loop over IPIV with increment 1, */
1086 /* since the ABS value of IPIV(I) represents the row (column) */
1087 /* index of the interchange with row (column) i in both 1x1 */
1088 /* and 2x2 pivot cases, i.e. we don't need separate code branches */
1089 /* for 1x1 and 2x2 pivot cases ) */
1092 for (i__ = 1; i__ <= i__1; ++i__) {
1093 ip = (i__2 = ipiv[i__], abs(i__2));
1096 zsyswapr_(uplo, n, &a[a_offset], lda, &i__, &ip);
1099 zsyswapr_(uplo, n, &a[a_offset], lda, &ip, &i__);
1108 /* inv A = P * inv(L**T) * inv(D) * inv(L) * P**T. */
1110 ztrtri_(uplo, "U", n, &a[a_offset], lda, info);
1112 /* inv(D) and inv(D) * inv(L) */
1117 /* 1 x 1 diagonal NNB */
1118 i__1 = k + invd * work_dim1;
1119 z_div(&z__1, &c_b1, &a[k + k * a_dim1]);
1120 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
1121 i__1 = k + (invd + 1) * work_dim1;
1122 work[i__1].r = 0., work[i__1].i = 0.;
1124 /* 2 x 2 diagonal NNB */
1125 i__1 = k - 1 + work_dim1;
1126 t.r = work[i__1].r, t.i = work[i__1].i;
1127 z_div(&z__1, &a[k - 1 + (k - 1) * a_dim1], &t);
1128 ak.r = z__1.r, ak.i = z__1.i;
1129 z_div(&z__1, &a[k + k * a_dim1], &t);
1130 akp1.r = z__1.r, akp1.i = z__1.i;
1131 z_div(&z__1, &work[k - 1 + work_dim1], &t);
1132 akkp1.r = z__1.r, akkp1.i = z__1.i;
1133 z__3.r = ak.r * akp1.r - ak.i * akp1.i, z__3.i = ak.r *
1134 akp1.i + ak.i * akp1.r;
1135 z__2.r = z__3.r - 1., z__2.i = z__3.i + 0.;
1136 z__1.r = t.r * z__2.r - t.i * z__2.i, z__1.i = t.r * z__2.i +
1138 d__.r = z__1.r, d__.i = z__1.i;
1139 i__1 = k - 1 + invd * work_dim1;
1140 z_div(&z__1, &akp1, &d__);
1141 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
1142 i__1 = k + invd * work_dim1;
1143 z_div(&z__1, &ak, &d__);
1144 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
1145 i__1 = k + (invd + 1) * work_dim1;
1146 z__2.r = -akkp1.r, z__2.i = -akkp1.i;
1147 z_div(&z__1, &z__2, &d__);
1148 work[i__1].r = z__1.r, work[i__1].i = z__1.i;
1149 i__1 = k - 1 + (invd + 1) * work_dim1;
1150 i__2 = k + (invd + 1) * work_dim1;
1151 work[i__1].r = work[i__2].r, work[i__1].i = work[i__2].i;
1157 /* inv(L**T) = (inv(L))**T */
1159 /* inv(L**T) * inv(D) * inv(L) */
1164 if (cut + nnb > *n) {
1168 /* count negative elements, */
1170 for (i__ = cut + 1; i__ <= i__1; ++i__) {
1171 if (ipiv[i__] < 0) {
1175 /* need a even number for a clear cut */
1176 if (icount % 2 == 1) {
1183 i__1 = *n - cut - nnb;
1184 for (i__ = 1; i__ <= i__1; ++i__) {
1186 for (j = 1; j <= i__2; ++j) {
1187 i__3 = i__ + j * work_dim1;
1188 i__4 = cut + nnb + i__ + (cut + j) * a_dim1;
1189 work[i__3].r = a[i__4].r, work[i__3].i = a[i__4].i;
1196 for (i__ = 1; i__ <= i__1; ++i__) {
1197 i__2 = u11 + i__ + i__ * work_dim1;
1198 work[i__2].r = 1., work[i__2].i = 0.;
1200 for (j = i__ + 1; j <= i__2; ++j) {
1201 i__3 = u11 + i__ + j * work_dim1;
1202 work[i__3].r = 0., work[i__3].i = 0.;
1205 for (j = 1; j <= i__2; ++j) {
1206 i__3 = u11 + i__ + j * work_dim1;
1207 i__4 = cut + i__ + (cut + j) * a_dim1;
1208 work[i__3].r = a[i__4].r, work[i__3].i = a[i__4].i;
1214 i__ = *n - cut - nnb;
1216 if (ipiv[cut + nnb + i__] > 0) {
1218 for (j = 1; j <= i__1; ++j) {
1219 i__2 = i__ + j * work_dim1;
1220 i__3 = cut + nnb + i__ + invd * work_dim1;
1221 i__4 = i__ + j * work_dim1;
1222 z__1.r = work[i__3].r * work[i__4].r - work[i__3].i *
1223 work[i__4].i, z__1.i = work[i__3].r * work[
1224 i__4].i + work[i__3].i * work[i__4].r;
1225 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1229 for (j = 1; j <= i__1; ++j) {
1230 i__2 = i__ + j * work_dim1;
1231 u01_i_j__.r = work[i__2].r, u01_i_j__.i = work[i__2]
1233 i__2 = i__ - 1 + j * work_dim1;
1234 u01_ip1_j__.r = work[i__2].r, u01_ip1_j__.i = work[
1236 i__2 = i__ + j * work_dim1;
1237 i__3 = cut + nnb + i__ + invd * work_dim1;
1238 z__2.r = work[i__3].r * u01_i_j__.r - work[i__3].i *
1239 u01_i_j__.i, z__2.i = work[i__3].r *
1240 u01_i_j__.i + work[i__3].i * u01_i_j__.r;
1241 i__4 = cut + nnb + i__ + (invd + 1) * work_dim1;
1242 z__3.r = work[i__4].r * u01_ip1_j__.r - work[i__4].i *
1243 u01_ip1_j__.i, z__3.i = work[i__4].r *
1244 u01_ip1_j__.i + work[i__4].i * u01_ip1_j__.r;
1245 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1246 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1247 i__2 = i__ - 1 + j * work_dim1;
1248 i__3 = cut + nnb + i__ - 1 + (invd + 1) * work_dim1;
1249 z__2.r = work[i__3].r * u01_i_j__.r - work[i__3].i *
1250 u01_i_j__.i, z__2.i = work[i__3].r *
1251 u01_i_j__.i + work[i__3].i * u01_i_j__.r;
1252 i__4 = cut + nnb + i__ - 1 + invd * work_dim1;
1253 z__3.r = work[i__4].r * u01_ip1_j__.r - work[i__4].i *
1254 u01_ip1_j__.i, z__3.i = work[i__4].r *
1255 u01_ip1_j__.i + work[i__4].i * u01_ip1_j__.r;
1256 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1257 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1268 if (ipiv[cut + i__] > 0) {
1270 for (j = 1; j <= i__1; ++j) {
1271 i__2 = u11 + i__ + j * work_dim1;
1272 i__3 = cut + i__ + invd * work_dim1;
1273 i__4 = u11 + i__ + j * work_dim1;
1274 z__1.r = work[i__3].r * work[i__4].r - work[i__3].i *
1275 work[i__4].i, z__1.i = work[i__3].r * work[
1276 i__4].i + work[i__3].i * work[i__4].r;
1277 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1281 for (j = 1; j <= i__1; ++j) {
1282 i__2 = u11 + i__ + j * work_dim1;
1283 u11_i_j__.r = work[i__2].r, u11_i_j__.i = work[i__2]
1285 i__2 = u11 + i__ - 1 + j * work_dim1;
1286 u11_ip1_j__.r = work[i__2].r, u11_ip1_j__.i = work[
1288 i__2 = u11 + i__ + j * work_dim1;
1289 i__3 = cut + i__ + invd * work_dim1;
1290 i__4 = u11 + i__ + j * work_dim1;
1291 z__2.r = work[i__3].r * work[i__4].r - work[i__3].i *
1292 work[i__4].i, z__2.i = work[i__3].r * work[
1293 i__4].i + work[i__3].i * work[i__4].r;
1294 i__5 = cut + i__ + (invd + 1) * work_dim1;
1295 z__3.r = work[i__5].r * u11_ip1_j__.r - work[i__5].i *
1296 u11_ip1_j__.i, z__3.i = work[i__5].r *
1297 u11_ip1_j__.i + work[i__5].i * u11_ip1_j__.r;
1298 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1299 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1300 i__2 = u11 + i__ - 1 + j * work_dim1;
1301 i__3 = cut + i__ - 1 + (invd + 1) * work_dim1;
1302 z__2.r = work[i__3].r * u11_i_j__.r - work[i__3].i *
1303 u11_i_j__.i, z__2.i = work[i__3].r *
1304 u11_i_j__.i + work[i__3].i * u11_i_j__.r;
1305 i__4 = cut + i__ - 1 + invd * work_dim1;
1306 z__3.r = work[i__4].r * u11_ip1_j__.r - work[i__4].i *
1307 u11_ip1_j__.i, z__3.i = work[i__4].r *
1308 u11_ip1_j__.i + work[i__4].i * u11_ip1_j__.r;
1309 z__1.r = z__2.r + z__3.r, z__1.i = z__2.i + z__3.i;
1310 work[i__2].r = z__1.r, work[i__2].i = z__1.i;
1317 /* L11**T * invD1 * L11 -> L11 */
1319 i__1 = *n + *nb + 1;
1320 ztrmm_("L", uplo, "T", "U", &nnb, &nnb, &c_b1, &a[cut + 1 + (cut
1321 + 1) * a_dim1], lda, &work[u11 + 1 + work_dim1], &i__1);
1324 for (i__ = 1; i__ <= i__1; ++i__) {
1326 for (j = 1; j <= i__2; ++j) {
1327 i__3 = cut + i__ + (cut + j) * a_dim1;
1328 i__4 = u11 + i__ + j * work_dim1;
1329 a[i__3].r = work[i__4].r, a[i__3].i = work[i__4].i;
1333 if (cut + nnb < *n) {
1335 /* L21**T * invD2*L21 -> A( CUT+I, CUT+J ) */
1337 i__1 = *n - nnb - cut;
1338 i__2 = *n + *nb + 1;
1339 i__3 = *n + *nb + 1;
1340 zgemm_("T", "N", &nnb, &nnb, &i__1, &c_b1, &a[cut + nnb + 1 +
1341 (cut + 1) * a_dim1], lda, &work[work_offset], &i__2, &
1342 c_b2, &work[u11 + 1 + work_dim1], &i__3);
1344 /* L11 = L11**T * invD1 * L11 + U01**T * invD * U01 */
1347 for (i__ = 1; i__ <= i__1; ++i__) {
1349 for (j = 1; j <= i__2; ++j) {
1350 i__3 = cut + i__ + (cut + j) * a_dim1;
1351 i__4 = cut + i__ + (cut + j) * a_dim1;
1352 i__5 = u11 + i__ + j * work_dim1;
1353 z__1.r = a[i__4].r + work[i__5].r, z__1.i = a[i__4].i
1355 a[i__3].r = z__1.r, a[i__3].i = z__1.i;
1359 /* L01 = L22**T * invD2 * L21 */
1361 i__1 = *n - nnb - cut;
1362 i__2 = *n + *nb + 1;
1363 ztrmm_("L", uplo, "T", "U", &i__1, &nnb, &c_b1, &a[cut + nnb
1364 + 1 + (cut + nnb + 1) * a_dim1], lda, &work[
1365 work_offset], &i__2);
1369 i__1 = *n - cut - nnb;
1370 for (i__ = 1; i__ <= i__1; ++i__) {
1372 for (j = 1; j <= i__2; ++j) {
1373 i__3 = cut + nnb + i__ + (cut + j) * a_dim1;
1374 i__4 = i__ + j * work_dim1;
1375 a[i__3].r = work[i__4].r, a[i__3].i = work[i__4].i;
1381 /* L11 = L11**T * invD1 * L11 */
1384 for (i__ = 1; i__ <= i__1; ++i__) {
1386 for (j = 1; j <= i__2; ++j) {
1387 i__3 = cut + i__ + (cut + j) * a_dim1;
1388 i__4 = u11 + i__ + j * work_dim1;
1389 a[i__3].r = work[i__4].r, a[i__3].i = work[i__4].i;
1400 /* Apply PERMUTATIONS P and P**T: */
1401 /* P * inv(L**T) * inv(D) * inv(L) * P**T. */
1402 /* Interchange rows and columns I and IPIV(I) in reverse order */
1403 /* from the formation order of IPIV vector for Lower case. */
1405 /* ( We can use a loop over IPIV with increment -1, */
1406 /* since the ABS value of IPIV(I) represents the row (column) */
1407 /* index of the interchange with row (column) i in both 1x1 */
1408 /* and 2x2 pivot cases, i.e. we don't need separate code branches */
1409 /* for 1x1 and 2x2 pivot cases ) */
1411 for (i__ = *n; i__ >= 1; --i__) {
1412 ip = (i__1 = ipiv[i__], abs(i__1));
1415 zsyswapr_(uplo, n, &a[a_offset], lda, &i__, &ip);
1418 zsyswapr_(uplo, n, &a[a_offset], lda, &ip, &i__);
1427 /* End of ZSYTRI_3X */