14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(_Fcomplex x, integer n) {
296 _Fcomplex pow={1.0,0.0}; complex tmp; unsigned long int u;
298 if(n < 0) n = -n, x._Val[0] = 1./x._Val[0], x._Val[1]=1./x._Val[1];
300 if(u & 01) pow = _FCmulcc(pow,x);
301 if(u >>= 1) x = _FCmulcc(x,x);
308 static _Complex float cpow_ui(_Complex float x, integer n) {
309 _Complex float pow=1.0; unsigned long int u;
311 if(n < 0) n = -n, x = 1/x;
322 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
323 _Dcomplex pow={1.0,0.0}; unsigned long int u;
325 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
327 if(u & 01) pow = _Cmulcc(pow,x);
328 if(u >>= 1) x = _Cmulcc(x,x);
335 static _Complex double zpow_ui(_Complex double x, integer n) {
336 _Complex double pow=1.0; unsigned long int u;
338 if(n < 0) n = -n, x = 1/x;
348 static integer pow_ii(integer x, integer n) {
349 integer pow; unsigned long int u;
351 if (n == 0 || x == 1) pow = 1;
352 else if (x != -1) pow = x == 0 ? 1/x : 0;
355 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
365 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
367 double m; integer i, mi;
368 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
369 if (w[i-1]>m) mi=i ,m=w[i-1];
372 static integer smaxloc_(float *w, integer s, integer e, integer *n)
374 float m; integer i, mi;
375 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
376 if (w[i-1]>m) mi=i ,m=w[i-1];
379 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
380 integer n = *n_, incx = *incx_, incy = *incy_, i;
382 _Fcomplex zdotc = {0.0, 0.0};
383 if (incx == 1 && incy == 1) {
384 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
385 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
386 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
389 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
390 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
391 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
397 _Complex float zdotc = 0.0;
398 if (incx == 1 && incy == 1) {
399 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
400 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
403 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
404 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
410 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
411 integer n = *n_, incx = *incx_, incy = *incy_, i;
413 _Dcomplex zdotc = {0.0, 0.0};
414 if (incx == 1 && incy == 1) {
415 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
416 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
417 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
420 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
421 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
422 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
428 _Complex double zdotc = 0.0;
429 if (incx == 1 && incy == 1) {
430 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
431 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
434 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
435 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
441 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
442 integer n = *n_, incx = *incx_, incy = *incy_, i;
444 _Fcomplex zdotc = {0.0, 0.0};
445 if (incx == 1 && incy == 1) {
446 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
447 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
448 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
451 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
452 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
453 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
459 _Complex float zdotc = 0.0;
460 if (incx == 1 && incy == 1) {
461 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
462 zdotc += Cf(&x[i]) * Cf(&y[i]);
465 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
466 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
472 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
473 integer n = *n_, incx = *incx_, incy = *incy_, i;
475 _Dcomplex zdotc = {0.0, 0.0};
476 if (incx == 1 && incy == 1) {
477 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
478 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
479 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
482 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
483 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
484 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
490 _Complex double zdotc = 0.0;
491 if (incx == 1 && incy == 1) {
492 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
493 zdotc += Cd(&x[i]) * Cd(&y[i]);
496 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
497 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
503 /* -- translated by f2c (version 20000121).
504 You must link the resulting object file with the libraries:
505 -lf2c -lm (in that order)
511 /* Table of constant values */
513 static complex c_b1 = {0.f,0.f};
514 static complex c_b2 = {1.f,0.f};
515 static integer c__1 = 1;
516 static integer c__2 = 2;
518 /* > \brief \b CHGEQZ */
520 /* =========== DOCUMENTATION =========== */
522 /* Online html documentation available at */
523 /* http://www.netlib.org/lapack/explore-html/ */
526 /* > Download CHGEQZ + dependencies */
527 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/chgeqz.
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/chgeqz.
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/chgeqz.
541 /* SUBROUTINE CHGEQZ( JOB, COMPQ, COMPZ, N, ILO, IHI, H, LDH, T, LDT, */
542 /* ALPHA, BETA, Q, LDQ, Z, LDZ, WORK, LWORK, */
545 /* CHARACTER COMPQ, COMPZ, JOB */
546 /* INTEGER IHI, ILO, INFO, LDH, LDQ, LDT, LDZ, LWORK, N */
547 /* REAL RWORK( * ) */
548 /* COMPLEX ALPHA( * ), BETA( * ), H( LDH, * ), */
549 /* $ Q( LDQ, * ), T( LDT, * ), WORK( * ), */
553 /* > \par Purpose: */
558 /* > CHGEQZ computes the eigenvalues of a complex matrix pair (H,T), */
559 /* > where H is an upper Hessenberg matrix and T is upper triangular, */
560 /* > using the single-shift QZ method. */
561 /* > Matrix pairs of this type are produced by the reduction to */
562 /* > generalized upper Hessenberg form of a complex matrix pair (A,B): */
564 /* > A = Q1*H*Z1**H, B = Q1*T*Z1**H, */
566 /* > as computed by CGGHRD. */
568 /* > If JOB='S', then the Hessenberg-triangular pair (H,T) is */
569 /* > also reduced to generalized Schur form, */
571 /* > H = Q*S*Z**H, T = Q*P*Z**H, */
573 /* > where Q and Z are unitary matrices and S and P are upper triangular. */
575 /* > Optionally, the unitary matrix Q from the generalized Schur */
576 /* > factorization may be postmultiplied into an input matrix Q1, and the */
577 /* > unitary matrix Z may be postmultiplied into an input matrix Z1. */
578 /* > If Q1 and Z1 are the unitary matrices from CGGHRD that reduced */
579 /* > the matrix pair (A,B) to generalized Hessenberg form, then the output */
580 /* > matrices Q1*Q and Z1*Z are the unitary factors from the generalized */
581 /* > Schur factorization of (A,B): */
583 /* > A = (Q1*Q)*S*(Z1*Z)**H, B = (Q1*Q)*P*(Z1*Z)**H. */
585 /* > To avoid overflow, eigenvalues of the matrix pair (H,T) */
586 /* > (equivalently, of (A,B)) are computed as a pair of complex values */
587 /* > (alpha,beta). If beta is nonzero, lambda = alpha / beta is an */
588 /* > eigenvalue of the generalized nonsymmetric eigenvalue problem (GNEP) */
589 /* > A*x = lambda*B*x */
590 /* > and if alpha is nonzero, mu = beta / alpha is an eigenvalue of the */
591 /* > alternate form of the GNEP */
592 /* > mu*A*y = B*y. */
593 /* > The values of alpha and beta for the i-th eigenvalue can be read */
594 /* > directly from the generalized Schur form: alpha = S(i,i), */
595 /* > beta = P(i,i). */
597 /* > Ref: C.B. Moler & G.W. Stewart, "An Algorithm for Generalized Matrix */
598 /* > Eigenvalue Problems", SIAM J. Numer. Anal., 10(1973), */
599 /* > pp. 241--256. */
605 /* > \param[in] JOB */
607 /* > JOB is CHARACTER*1 */
608 /* > = 'E': Compute eigenvalues only; */
609 /* > = 'S': Computer eigenvalues and the Schur form. */
612 /* > \param[in] COMPQ */
614 /* > COMPQ is CHARACTER*1 */
615 /* > = 'N': Left Schur vectors (Q) are not computed; */
616 /* > = 'I': Q is initialized to the unit matrix and the matrix Q */
617 /* > of left Schur vectors of (H,T) is returned; */
618 /* > = 'V': Q must contain a unitary matrix Q1 on entry and */
619 /* > the product Q1*Q is returned. */
622 /* > \param[in] COMPZ */
624 /* > COMPZ is CHARACTER*1 */
625 /* > = 'N': Right Schur vectors (Z) are not computed; */
626 /* > = 'I': Q is initialized to the unit matrix and the matrix Z */
627 /* > of right Schur vectors of (H,T) is returned; */
628 /* > = 'V': Z must contain a unitary matrix Z1 on entry and */
629 /* > the product Z1*Z is returned. */
635 /* > The order of the matrices H, T, Q, and Z. N >= 0. */
638 /* > \param[in] ILO */
640 /* > ILO is INTEGER */
643 /* > \param[in] IHI */
645 /* > IHI is INTEGER */
646 /* > ILO and IHI mark the rows and columns of H which are in */
647 /* > Hessenberg form. It is assumed that A is already upper */
648 /* > triangular in rows and columns 1:ILO-1 and IHI+1:N. */
649 /* > If N > 0, 1 <= ILO <= IHI <= N; if N = 0, ILO=1 and IHI=0. */
652 /* > \param[in,out] H */
654 /* > H is COMPLEX array, dimension (LDH, N) */
655 /* > On entry, the N-by-N upper Hessenberg matrix H. */
656 /* > On exit, if JOB = 'S', H contains the upper triangular */
657 /* > matrix S from the generalized Schur factorization. */
658 /* > If JOB = 'E', the diagonal of H matches that of S, but */
659 /* > the rest of H is unspecified. */
662 /* > \param[in] LDH */
664 /* > LDH is INTEGER */
665 /* > The leading dimension of the array H. LDH >= f2cmax( 1, N ). */
668 /* > \param[in,out] T */
670 /* > T is COMPLEX array, dimension (LDT, N) */
671 /* > On entry, the N-by-N upper triangular matrix T. */
672 /* > On exit, if JOB = 'S', T contains the upper triangular */
673 /* > matrix P from the generalized Schur factorization. */
674 /* > If JOB = 'E', the diagonal of T matches that of P, but */
675 /* > the rest of T is unspecified. */
678 /* > \param[in] LDT */
680 /* > LDT is INTEGER */
681 /* > The leading dimension of the array T. LDT >= f2cmax( 1, N ). */
684 /* > \param[out] ALPHA */
686 /* > ALPHA is COMPLEX array, dimension (N) */
687 /* > The complex scalars alpha that define the eigenvalues of */
688 /* > GNEP. ALPHA(i) = S(i,i) in the generalized Schur */
689 /* > factorization. */
692 /* > \param[out] BETA */
694 /* > BETA is COMPLEX array, dimension (N) */
695 /* > The real non-negative scalars beta that define the */
696 /* > eigenvalues of GNEP. BETA(i) = P(i,i) in the generalized */
697 /* > Schur factorization. */
699 /* > Together, the quantities alpha = ALPHA(j) and beta = BETA(j) */
700 /* > represent the j-th eigenvalue of the matrix pair (A,B), in */
701 /* > one of the forms lambda = alpha/beta or mu = beta/alpha. */
702 /* > Since either lambda or mu may overflow, they should not, */
703 /* > in general, be computed. */
706 /* > \param[in,out] Q */
708 /* > Q is COMPLEX array, dimension (LDQ, N) */
709 /* > On entry, if COMPQ = 'V', the unitary matrix Q1 used in the */
710 /* > reduction of (A,B) to generalized Hessenberg form. */
711 /* > On exit, if COMPQ = 'I', the unitary matrix of left Schur */
712 /* > vectors of (H,T), and if COMPQ = 'V', the unitary matrix of */
713 /* > left Schur vectors of (A,B). */
714 /* > Not referenced if COMPQ = 'N'. */
717 /* > \param[in] LDQ */
719 /* > LDQ is INTEGER */
720 /* > The leading dimension of the array Q. LDQ >= 1. */
721 /* > If COMPQ='V' or 'I', then LDQ >= N. */
724 /* > \param[in,out] Z */
726 /* > Z is COMPLEX array, dimension (LDZ, N) */
727 /* > On entry, if COMPZ = 'V', the unitary matrix Z1 used in the */
728 /* > reduction of (A,B) to generalized Hessenberg form. */
729 /* > On exit, if COMPZ = 'I', the unitary matrix of right Schur */
730 /* > vectors of (H,T), and if COMPZ = 'V', the unitary matrix of */
731 /* > right Schur vectors of (A,B). */
732 /* > Not referenced if COMPZ = 'N'. */
735 /* > \param[in] LDZ */
737 /* > LDZ is INTEGER */
738 /* > The leading dimension of the array Z. LDZ >= 1. */
739 /* > If COMPZ='V' or 'I', then LDZ >= N. */
742 /* > \param[out] WORK */
744 /* > WORK is COMPLEX array, dimension (MAX(1,LWORK)) */
745 /* > On exit, if INFO >= 0, WORK(1) returns the optimal LWORK. */
748 /* > \param[in] LWORK */
750 /* > LWORK is INTEGER */
751 /* > The dimension of the array WORK. LWORK >= f2cmax(1,N). */
753 /* > If LWORK = -1, then a workspace query is assumed; the routine */
754 /* > only calculates the optimal size of the WORK array, returns */
755 /* > this value as the first entry of the WORK array, and no error */
756 /* > message related to LWORK is issued by XERBLA. */
759 /* > \param[out] RWORK */
761 /* > RWORK is REAL array, dimension (N) */
764 /* > \param[out] INFO */
766 /* > INFO is INTEGER */
767 /* > = 0: successful exit */
768 /* > < 0: if INFO = -i, the i-th argument had an illegal value */
769 /* > = 1,...,N: the QZ iteration did not converge. (H,T) is not */
770 /* > in Schur form, but ALPHA(i) and BETA(i), */
771 /* > i=INFO+1,...,N should be correct. */
772 /* > = N+1,...,2*N: the shift calculation failed. (H,T) is not */
773 /* > in Schur form, but ALPHA(i) and BETA(i), */
774 /* > i=INFO-N+1,...,N should be correct. */
780 /* > \author Univ. of Tennessee */
781 /* > \author Univ. of California Berkeley */
782 /* > \author Univ. of Colorado Denver */
783 /* > \author NAG Ltd. */
785 /* > \date April 2012 */
787 /* > \ingroup complexGEcomputational */
789 /* > \par Further Details: */
790 /* ===================== */
794 /* > We assume that complex ABS works as long as its value is less than */
798 /* ===================================================================== */
799 /* Subroutine */ int chgeqz_(char *job, char *compq, char *compz, integer *n,
800 integer *ilo, integer *ihi, complex *h__, integer *ldh, complex *t,
801 integer *ldt, complex *alpha, complex *beta, complex *q, integer *ldq,
802 complex *z__, integer *ldz, complex *work, integer *lwork, real *
803 rwork, integer *info)
805 /* System generated locals */
806 integer h_dim1, h_offset, q_dim1, q_offset, t_dim1, t_offset, z_dim1,
807 z_offset, i__1, i__2, i__3, i__4, i__5, i__6;
808 real r__1, r__2, r__3, r__4, r__5, r__6;
809 complex q__1, q__2, q__3, q__4, q__5, q__6, q__7;
811 /* Local variables */
812 real absb, atol, btol, temp;
813 extern /* Subroutine */ int crot_(integer *, complex *, integer *,
814 complex *, integer *, real *, complex *);
818 extern /* Subroutine */ int cscal_(integer *, complex *, complex *,
821 extern logical lsame_(char *, char *);
823 integer iiter, ilast, jiter;
828 complex ctemp2, ctemp3;
834 extern /* Complex */ VOID cladiv_(complex *, complex *, complex *);
836 extern real slamch_(char *), clanhs_(char *, integer *, complex *,
838 extern /* Subroutine */ int claset_(char *, integer *, integer *, complex
839 *, complex *, complex *, integer *), clartg_(complex *,
840 complex *, real *, complex *, complex *);
842 extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
845 integer icompq, ilastm, ischur;
847 integer icompz, ifirst, ifrstm, istart;
849 complex ad11, ad12, ad21, ad22;
853 complex abi12, abi22;
856 /* -- LAPACK computational routine (version 3.7.0) -- */
857 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
858 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
862 /* ===================================================================== */
865 /* Decode JOB, COMPQ, COMPZ */
867 /* Parameter adjustments */
869 h_offset = 1 + h_dim1 * 1;
872 t_offset = 1 + t_dim1 * 1;
877 q_offset = 1 + q_dim1 * 1;
880 z_offset = 1 + z_dim1 * 1;
886 if (lsame_(job, "E")) {
889 } else if (lsame_(job, "S")) {
897 if (lsame_(compq, "N")) {
900 } else if (lsame_(compq, "V")) {
903 } else if (lsame_(compq, "I")) {
911 if (lsame_(compz, "N")) {
914 } else if (lsame_(compz, "V")) {
917 } else if (lsame_(compz, "I")) {
925 /* Check Argument Values */
929 work[1].r = (real) i__1, work[1].i = 0.f;
930 lquery = *lwork == -1;
933 } else if (icompq == 0) {
935 } else if (icompz == 0) {
939 } else if (*ilo < 1) {
941 } else if (*ihi > *n || *ihi < *ilo - 1) {
943 } else if (*ldh < *n) {
945 } else if (*ldt < *n) {
947 } else if (*ldq < 1 || ilq && *ldq < *n) {
949 } else if (*ldz < 1 || ilz && *ldz < *n) {
951 } else if (*lwork < f2cmax(1,*n) && ! lquery) {
956 xerbla_("CHGEQZ", &i__1, (ftnlen)6);
962 /* Quick return if possible */
964 /* WORK( 1 ) = CMPLX( 1 ) */
966 work[1].r = 1.f, work[1].i = 0.f;
970 /* Initialize Q and Z */
973 claset_("Full", n, n, &c_b1, &c_b2, &q[q_offset], ldq);
976 claset_("Full", n, n, &c_b1, &c_b2, &z__[z_offset], ldz);
979 /* Machine Constants */
981 in = *ihi + 1 - *ilo;
982 safmin = slamch_("S");
983 ulp = slamch_("E") * slamch_("B");
984 anorm = clanhs_("F", &in, &h__[*ilo + *ilo * h_dim1], ldh, &rwork[1]);
985 bnorm = clanhs_("F", &in, &t[*ilo + *ilo * t_dim1], ldt, &rwork[1]);
987 r__1 = safmin, r__2 = ulp * anorm;
988 atol = f2cmax(r__1,r__2);
990 r__1 = safmin, r__2 = ulp * bnorm;
991 btol = f2cmax(r__1,r__2);
992 ascale = 1.f / f2cmax(safmin,anorm);
993 bscale = 1.f / f2cmax(safmin,bnorm);
996 /* Set Eigenvalues IHI+1:N */
999 for (j = *ihi + 1; j <= i__1; ++j) {
1000 absb = c_abs(&t[j + j * t_dim1]);
1001 if (absb > safmin) {
1002 i__2 = j + j * t_dim1;
1003 q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
1004 r_cnjg(&q__1, &q__2);
1005 signbc.r = q__1.r, signbc.i = q__1.i;
1006 i__2 = j + j * t_dim1;
1007 t[i__2].r = absb, t[i__2].i = 0.f;
1010 cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
1011 cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
1013 cscal_(&c__1, &signbc, &h__[j + j * h_dim1], &c__1);
1016 cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
1019 i__2 = j + j * t_dim1;
1020 t[i__2].r = 0.f, t[i__2].i = 0.f;
1023 i__3 = j + j * h_dim1;
1024 alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
1026 i__3 = j + j * t_dim1;
1027 beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
1031 /* If IHI < ILO, skip QZ steps */
1037 /* MAIN QZ ITERATION LOOP */
1039 /* Initialize dynamic indices */
1041 /* Eigenvalues ILAST+1:N have been found. */
1042 /* Column operations modify rows IFRSTM:whatever */
1043 /* Row operations modify columns whatever:ILASTM */
1045 /* If only eigenvalues are being computed, then */
1046 /* IFRSTM is the row of the last splitting row above row ILAST; */
1047 /* this is always at least ILO. */
1048 /* IITER counts iterations since the last eigenvalue was found, */
1049 /* to tell when to use an extraordinary shift. */
1050 /* MAXIT is the maximum number of QZ sweeps allowed. */
1061 eshift.r = 0.f, eshift.i = 0.f;
1062 maxit = (*ihi - *ilo + 1) * 30;
1065 for (jiter = 1; jiter <= i__1; ++jiter) {
1067 /* Check for too many iterations. */
1069 if (jiter > maxit) {
1073 /* Split the matrix if possible. */
1076 /* 1: H(j,j-1)=0 or j=ILO */
1079 /* Special case: j=ILAST */
1081 if (ilast == *ilo) {
1084 i__2 = ilast + (ilast - 1) * h_dim1;
1085 if ((r__1 = h__[i__2].r, abs(r__1)) + (r__2 = r_imag(&h__[ilast +
1086 (ilast - 1) * h_dim1]), abs(r__2)) <= atol) {
1087 i__2 = ilast + (ilast - 1) * h_dim1;
1088 h__[i__2].r = 0.f, h__[i__2].i = 0.f;
1093 if (c_abs(&t[ilast + ilast * t_dim1]) <= btol) {
1094 i__2 = ilast + ilast * t_dim1;
1095 t[i__2].r = 0.f, t[i__2].i = 0.f;
1099 /* General case: j<ILAST */
1102 for (j = ilast - 1; j >= i__2; --j) {
1104 /* Test 1: for H(j,j-1)=0 or j=ILO */
1109 i__3 = j + (j - 1) * h_dim1;
1110 if ((r__1 = h__[i__3].r, abs(r__1)) + (r__2 = r_imag(&h__[j +
1111 (j - 1) * h_dim1]), abs(r__2)) <= atol) {
1112 i__3 = j + (j - 1) * h_dim1;
1113 h__[i__3].r = 0.f, h__[i__3].i = 0.f;
1120 /* Test 2: for T(j,j)=0 */
1122 if (c_abs(&t[j + j * t_dim1]) < btol) {
1123 i__3 = j + j * t_dim1;
1124 t[i__3].r = 0.f, t[i__3].i = 0.f;
1126 /* Test 1a: Check for 2 consecutive small subdiagonals in A */
1130 i__3 = j + (j - 1) * h_dim1;
1131 i__4 = j + 1 + j * h_dim1;
1132 i__5 = j + j * h_dim1;
1133 if (((r__1 = h__[i__3].r, abs(r__1)) + (r__2 = r_imag(&
1134 h__[j + (j - 1) * h_dim1]), abs(r__2))) * (ascale
1135 * ((r__3 = h__[i__4].r, abs(r__3)) + (r__4 =
1136 r_imag(&h__[j + 1 + j * h_dim1]), abs(r__4)))) <=
1137 ((r__5 = h__[i__5].r, abs(r__5)) + (r__6 = r_imag(
1138 &h__[j + j * h_dim1]), abs(r__6))) * (ascale *
1144 /* If both tests pass (1 & 2), i.e., the leading diagonal */
1145 /* element of B in the block is zero, split a 1x1 block off */
1146 /* at the top. (I.e., at the J-th row/column) The leading */
1147 /* diagonal element of the remainder can also be zero, so */
1148 /* this may have to be done repeatedly. */
1150 if (ilazro || ilazr2) {
1152 for (jch = j; jch <= i__3; ++jch) {
1153 i__4 = jch + jch * h_dim1;
1154 ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
1155 clartg_(&ctemp, &h__[jch + 1 + jch * h_dim1], &c__, &
1156 s, &h__[jch + jch * h_dim1]);
1157 i__4 = jch + 1 + jch * h_dim1;
1158 h__[i__4].r = 0.f, h__[i__4].i = 0.f;
1159 i__4 = ilastm - jch;
1160 crot_(&i__4, &h__[jch + (jch + 1) * h_dim1], ldh, &
1161 h__[jch + 1 + (jch + 1) * h_dim1], ldh, &c__,
1163 i__4 = ilastm - jch;
1164 crot_(&i__4, &t[jch + (jch + 1) * t_dim1], ldt, &t[
1165 jch + 1 + (jch + 1) * t_dim1], ldt, &c__, &s);
1168 crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
1169 * q_dim1 + 1], &c__1, &c__, &q__1);
1172 i__4 = jch + (jch - 1) * h_dim1;
1173 i__5 = jch + (jch - 1) * h_dim1;
1174 q__1.r = c__ * h__[i__5].r, q__1.i = c__ * h__[
1176 h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
1179 i__4 = jch + 1 + (jch + 1) * t_dim1;
1180 if ((r__1 = t[i__4].r, abs(r__1)) + (r__2 = r_imag(&t[
1181 jch + 1 + (jch + 1) * t_dim1]), abs(r__2)) >=
1183 if (jch + 1 >= ilast) {
1190 i__4 = jch + 1 + (jch + 1) * t_dim1;
1191 t[i__4].r = 0.f, t[i__4].i = 0.f;
1197 /* Only test 2 passed -- chase the zero to T(ILAST,ILAST) */
1198 /* Then process as in the case T(ILAST,ILAST)=0 */
1201 for (jch = j; jch <= i__3; ++jch) {
1202 i__4 = jch + (jch + 1) * t_dim1;
1203 ctemp.r = t[i__4].r, ctemp.i = t[i__4].i;
1204 clartg_(&ctemp, &t[jch + 1 + (jch + 1) * t_dim1], &
1205 c__, &s, &t[jch + (jch + 1) * t_dim1]);
1206 i__4 = jch + 1 + (jch + 1) * t_dim1;
1207 t[i__4].r = 0.f, t[i__4].i = 0.f;
1208 if (jch < ilastm - 1) {
1209 i__4 = ilastm - jch - 1;
1210 crot_(&i__4, &t[jch + (jch + 2) * t_dim1], ldt, &
1211 t[jch + 1 + (jch + 2) * t_dim1], ldt, &
1214 i__4 = ilastm - jch + 2;
1215 crot_(&i__4, &h__[jch + (jch - 1) * h_dim1], ldh, &
1216 h__[jch + 1 + (jch - 1) * h_dim1], ldh, &c__,
1220 crot_(n, &q[jch * q_dim1 + 1], &c__1, &q[(jch + 1)
1221 * q_dim1 + 1], &c__1, &c__, &q__1);
1223 i__4 = jch + 1 + jch * h_dim1;
1224 ctemp.r = h__[i__4].r, ctemp.i = h__[i__4].i;
1225 clartg_(&ctemp, &h__[jch + 1 + (jch - 1) * h_dim1], &
1226 c__, &s, &h__[jch + 1 + jch * h_dim1]);
1227 i__4 = jch + 1 + (jch - 1) * h_dim1;
1228 h__[i__4].r = 0.f, h__[i__4].i = 0.f;
1229 i__4 = jch + 1 - ifrstm;
1230 crot_(&i__4, &h__[ifrstm + jch * h_dim1], &c__1, &h__[
1231 ifrstm + (jch - 1) * h_dim1], &c__1, &c__, &s)
1233 i__4 = jch - ifrstm;
1234 crot_(&i__4, &t[ifrstm + jch * t_dim1], &c__1, &t[
1235 ifrstm + (jch - 1) * t_dim1], &c__1, &c__, &s)
1238 crot_(n, &z__[jch * z_dim1 + 1], &c__1, &z__[(jch
1239 - 1) * z_dim1 + 1], &c__1, &c__, &s);
1245 } else if (ilazro) {
1247 /* Only test 1 passed -- work on J:ILAST */
1253 /* Neither test passed -- try next J */
1258 /* (Drop-through is "impossible") */
1260 *info = (*n << 1) + 1;
1263 /* T(ILAST,ILAST)=0 -- clear H(ILAST,ILAST-1) to split off a */
1267 i__2 = ilast + ilast * h_dim1;
1268 ctemp.r = h__[i__2].r, ctemp.i = h__[i__2].i;
1269 clartg_(&ctemp, &h__[ilast + (ilast - 1) * h_dim1], &c__, &s, &h__[
1270 ilast + ilast * h_dim1]);
1271 i__2 = ilast + (ilast - 1) * h_dim1;
1272 h__[i__2].r = 0.f, h__[i__2].i = 0.f;
1273 i__2 = ilast - ifrstm;
1274 crot_(&i__2, &h__[ifrstm + ilast * h_dim1], &c__1, &h__[ifrstm + (
1275 ilast - 1) * h_dim1], &c__1, &c__, &s);
1276 i__2 = ilast - ifrstm;
1277 crot_(&i__2, &t[ifrstm + ilast * t_dim1], &c__1, &t[ifrstm + (ilast -
1278 1) * t_dim1], &c__1, &c__, &s);
1280 crot_(n, &z__[ilast * z_dim1 + 1], &c__1, &z__[(ilast - 1) *
1281 z_dim1 + 1], &c__1, &c__, &s);
1284 /* H(ILAST,ILAST-1)=0 -- Standardize B, set ALPHA and BETA */
1287 absb = c_abs(&t[ilast + ilast * t_dim1]);
1288 if (absb > safmin) {
1289 i__2 = ilast + ilast * t_dim1;
1290 q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
1291 r_cnjg(&q__1, &q__2);
1292 signbc.r = q__1.r, signbc.i = q__1.i;
1293 i__2 = ilast + ilast * t_dim1;
1294 t[i__2].r = absb, t[i__2].i = 0.f;
1296 i__2 = ilast - ifrstm;
1297 cscal_(&i__2, &signbc, &t[ifrstm + ilast * t_dim1], &c__1);
1298 i__2 = ilast + 1 - ifrstm;
1299 cscal_(&i__2, &signbc, &h__[ifrstm + ilast * h_dim1], &c__1);
1301 cscal_(&c__1, &signbc, &h__[ilast + ilast * h_dim1], &c__1);
1304 cscal_(n, &signbc, &z__[ilast * z_dim1 + 1], &c__1);
1307 i__2 = ilast + ilast * t_dim1;
1308 t[i__2].r = 0.f, t[i__2].i = 0.f;
1311 i__3 = ilast + ilast * h_dim1;
1312 alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
1314 i__3 = ilast + ilast * t_dim1;
1315 beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
1317 /* Go to next block -- exit if finished. */
1324 /* Reset counters */
1327 eshift.r = 0.f, eshift.i = 0.f;
1330 if (ifrstm > ilast) {
1338 /* This iteration only involves rows/columns IFIRST:ILAST. We */
1339 /* assume IFIRST < ILAST, and that the diagonal of B is non-zero. */
1347 /* Compute the Shift. */
1349 /* At this point, IFIRST < ILAST, and the diagonal elements of */
1350 /* T(IFIRST:ILAST,IFIRST,ILAST) are larger than BTOL (in */
1353 if (iiter / 10 * 10 != iiter) {
1355 /* The Wilkinson shift (AEP p.512), i.e., the eigenvalue of */
1356 /* the bottom-right 2x2 block of A inv(B) which is nearest to */
1357 /* the bottom-right element. */
1359 /* We factor B as U*D, where U has unit diagonals, and */
1360 /* compute (A*inv(D))*inv(U). */
1362 i__2 = ilast - 1 + ilast * t_dim1;
1363 q__2.r = bscale * t[i__2].r, q__2.i = bscale * t[i__2].i;
1364 i__3 = ilast + ilast * t_dim1;
1365 q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
1366 c_div(&q__1, &q__2, &q__3);
1367 u12.r = q__1.r, u12.i = q__1.i;
1368 i__2 = ilast - 1 + (ilast - 1) * h_dim1;
1369 q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
1370 i__3 = ilast - 1 + (ilast - 1) * t_dim1;
1371 q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
1372 c_div(&q__1, &q__2, &q__3);
1373 ad11.r = q__1.r, ad11.i = q__1.i;
1374 i__2 = ilast + (ilast - 1) * h_dim1;
1375 q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
1376 i__3 = ilast - 1 + (ilast - 1) * t_dim1;
1377 q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
1378 c_div(&q__1, &q__2, &q__3);
1379 ad21.r = q__1.r, ad21.i = q__1.i;
1380 i__2 = ilast - 1 + ilast * h_dim1;
1381 q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
1382 i__3 = ilast + ilast * t_dim1;
1383 q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
1384 c_div(&q__1, &q__2, &q__3);
1385 ad12.r = q__1.r, ad12.i = q__1.i;
1386 i__2 = ilast + ilast * h_dim1;
1387 q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
1388 i__3 = ilast + ilast * t_dim1;
1389 q__3.r = bscale * t[i__3].r, q__3.i = bscale * t[i__3].i;
1390 c_div(&q__1, &q__2, &q__3);
1391 ad22.r = q__1.r, ad22.i = q__1.i;
1392 q__2.r = u12.r * ad21.r - u12.i * ad21.i, q__2.i = u12.r * ad21.i
1394 q__1.r = ad22.r - q__2.r, q__1.i = ad22.i - q__2.i;
1395 abi22.r = q__1.r, abi22.i = q__1.i;
1396 q__2.r = u12.r * ad11.r - u12.i * ad11.i, q__2.i = u12.r * ad11.i
1398 q__1.r = ad12.r - q__2.r, q__1.i = ad12.i - q__2.i;
1399 abi12.r = q__1.r, abi12.i = q__1.i;
1401 shift.r = abi22.r, shift.i = abi22.i;
1402 c_sqrt(&q__2, &abi12);
1403 c_sqrt(&q__3, &ad21);
1404 q__1.r = q__2.r * q__3.r - q__2.i * q__3.i, q__1.i = q__2.r *
1405 q__3.i + q__2.i * q__3.r;
1406 ctemp.r = q__1.r, ctemp.i = q__1.i;
1407 temp = (r__1 = ctemp.r, abs(r__1)) + (r__2 = r_imag(&ctemp), abs(
1409 if (ctemp.r != 0.f || ctemp.i != 0.f) {
1410 q__2.r = ad11.r - shift.r, q__2.i = ad11.i - shift.i;
1411 q__1.r = q__2.r * .5f, q__1.i = q__2.i * .5f;
1412 x.r = q__1.r, x.i = q__1.i;
1413 temp2 = (r__1 = x.r, abs(r__1)) + (r__2 = r_imag(&x), abs(
1416 r__3 = temp, r__4 = (r__1 = x.r, abs(r__1)) + (r__2 = r_imag(&
1418 temp = f2cmax(r__3,r__4);
1419 q__5.r = x.r / temp, q__5.i = x.i / temp;
1420 pow_ci(&q__4, &q__5, &c__2);
1421 q__7.r = ctemp.r / temp, q__7.i = ctemp.i / temp;
1422 pow_ci(&q__6, &q__7, &c__2);
1423 q__3.r = q__4.r + q__6.r, q__3.i = q__4.i + q__6.i;
1424 c_sqrt(&q__2, &q__3);
1425 q__1.r = temp * q__2.r, q__1.i = temp * q__2.i;
1426 y.r = q__1.r, y.i = q__1.i;
1428 q__1.r = x.r / temp2, q__1.i = x.i / temp2;
1429 q__2.r = x.r / temp2, q__2.i = x.i / temp2;
1430 if (q__1.r * y.r + r_imag(&q__2) * r_imag(&y) < 0.f) {
1431 q__3.r = -y.r, q__3.i = -y.i;
1432 y.r = q__3.r, y.i = q__3.i;
1435 q__4.r = x.r + y.r, q__4.i = x.i + y.i;
1436 cladiv_(&q__3, &ctemp, &q__4);
1437 q__2.r = ctemp.r * q__3.r - ctemp.i * q__3.i, q__2.i =
1438 ctemp.r * q__3.i + ctemp.i * q__3.r;
1439 q__1.r = shift.r - q__2.r, q__1.i = shift.i - q__2.i;
1440 shift.r = q__1.r, shift.i = q__1.i;
1444 /* Exceptional shift. Chosen for no particularly good reason. */
1446 i__2 = ilast + ilast * t_dim1;
1447 if (iiter / 20 * 20 == iiter && bscale * ((r__1 = t[i__2].r, abs(
1448 r__1)) + (r__2 = r_imag(&t[ilast + ilast * t_dim1]), abs(
1450 i__2 = ilast + ilast * h_dim1;
1451 q__3.r = ascale * h__[i__2].r, q__3.i = ascale * h__[i__2].i;
1452 i__3 = ilast + ilast * t_dim1;
1453 q__4.r = bscale * t[i__3].r, q__4.i = bscale * t[i__3].i;
1454 c_div(&q__2, &q__3, &q__4);
1455 q__1.r = eshift.r + q__2.r, q__1.i = eshift.i + q__2.i;
1456 eshift.r = q__1.r, eshift.i = q__1.i;
1458 i__2 = ilast + (ilast - 1) * h_dim1;
1459 q__3.r = ascale * h__[i__2].r, q__3.i = ascale * h__[i__2].i;
1460 i__3 = ilast - 1 + (ilast - 1) * t_dim1;
1461 q__4.r = bscale * t[i__3].r, q__4.i = bscale * t[i__3].i;
1462 c_div(&q__2, &q__3, &q__4);
1463 q__1.r = eshift.r + q__2.r, q__1.i = eshift.i + q__2.i;
1464 eshift.r = q__1.r, eshift.i = q__1.i;
1466 shift.r = eshift.r, shift.i = eshift.i;
1469 /* Now check for two consecutive small subdiagonals. */
1472 for (j = ilast - 1; j >= i__2; --j) {
1474 i__3 = j + j * h_dim1;
1475 q__2.r = ascale * h__[i__3].r, q__2.i = ascale * h__[i__3].i;
1476 i__4 = j + j * t_dim1;
1477 q__4.r = bscale * t[i__4].r, q__4.i = bscale * t[i__4].i;
1478 q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r *
1479 q__4.i + shift.i * q__4.r;
1480 q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
1481 ctemp.r = q__1.r, ctemp.i = q__1.i;
1482 temp = (r__1 = ctemp.r, abs(r__1)) + (r__2 = r_imag(&ctemp), abs(
1484 i__3 = j + 1 + j * h_dim1;
1485 temp2 = ascale * ((r__1 = h__[i__3].r, abs(r__1)) + (r__2 =
1486 r_imag(&h__[j + 1 + j * h_dim1]), abs(r__2)));
1487 tempr = f2cmax(temp,temp2);
1488 if (tempr < 1.f && tempr != 0.f) {
1492 i__3 = j + (j - 1) * h_dim1;
1493 if (((r__1 = h__[i__3].r, abs(r__1)) + (r__2 = r_imag(&h__[j + (j
1494 - 1) * h_dim1]), abs(r__2))) * temp2 <= temp * atol) {
1501 i__2 = ifirst + ifirst * h_dim1;
1502 q__2.r = ascale * h__[i__2].r, q__2.i = ascale * h__[i__2].i;
1503 i__3 = ifirst + ifirst * t_dim1;
1504 q__4.r = bscale * t[i__3].r, q__4.i = bscale * t[i__3].i;
1505 q__3.r = shift.r * q__4.r - shift.i * q__4.i, q__3.i = shift.r *
1506 q__4.i + shift.i * q__4.r;
1507 q__1.r = q__2.r - q__3.r, q__1.i = q__2.i - q__3.i;
1508 ctemp.r = q__1.r, ctemp.i = q__1.i;
1511 /* Do an implicit-shift QZ sweep. */
1515 i__2 = istart + 1 + istart * h_dim1;
1516 q__1.r = ascale * h__[i__2].r, q__1.i = ascale * h__[i__2].i;
1517 ctemp2.r = q__1.r, ctemp2.i = q__1.i;
1518 clartg_(&ctemp, &ctemp2, &c__, &s, &ctemp3);
1523 for (j = istart; j <= i__2; ++j) {
1525 i__3 = j + (j - 1) * h_dim1;
1526 ctemp.r = h__[i__3].r, ctemp.i = h__[i__3].i;
1527 clartg_(&ctemp, &h__[j + 1 + (j - 1) * h_dim1], &c__, &s, &
1528 h__[j + (j - 1) * h_dim1]);
1529 i__3 = j + 1 + (j - 1) * h_dim1;
1530 h__[i__3].r = 0.f, h__[i__3].i = 0.f;
1534 for (jc = j; jc <= i__3; ++jc) {
1535 i__4 = j + jc * h_dim1;
1536 q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
1537 i__5 = j + 1 + jc * h_dim1;
1538 q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
1539 h__[i__5].i + s.i * h__[i__5].r;
1540 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1541 ctemp.r = q__1.r, ctemp.i = q__1.i;
1542 i__4 = j + 1 + jc * h_dim1;
1544 q__3.r = -q__4.r, q__3.i = -q__4.i;
1545 i__5 = j + jc * h_dim1;
1546 q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
1547 q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
1548 i__6 = j + 1 + jc * h_dim1;
1549 q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
1550 q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
1551 h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
1552 i__4 = j + jc * h_dim1;
1553 h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
1554 i__4 = j + jc * t_dim1;
1555 q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
1556 i__5 = j + 1 + jc * t_dim1;
1557 q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
1558 i__5].i + s.i * t[i__5].r;
1559 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1560 ctemp2.r = q__1.r, ctemp2.i = q__1.i;
1561 i__4 = j + 1 + jc * t_dim1;
1563 q__3.r = -q__4.r, q__3.i = -q__4.i;
1564 i__5 = j + jc * t_dim1;
1565 q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i =
1566 q__3.r * t[i__5].i + q__3.i * t[i__5].r;
1567 i__6 = j + 1 + jc * t_dim1;
1568 q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
1569 q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
1570 t[i__4].r = q__1.r, t[i__4].i = q__1.i;
1571 i__4 = j + jc * t_dim1;
1572 t[i__4].r = ctemp2.r, t[i__4].i = ctemp2.i;
1577 for (jr = 1; jr <= i__3; ++jr) {
1578 i__4 = jr + j * q_dim1;
1579 q__2.r = c__ * q[i__4].r, q__2.i = c__ * q[i__4].i;
1581 i__5 = jr + (j + 1) * q_dim1;
1582 q__3.r = q__4.r * q[i__5].r - q__4.i * q[i__5].i, q__3.i =
1583 q__4.r * q[i__5].i + q__4.i * q[i__5].r;
1584 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1585 ctemp.r = q__1.r, ctemp.i = q__1.i;
1586 i__4 = jr + (j + 1) * q_dim1;
1587 q__3.r = -s.r, q__3.i = -s.i;
1588 i__5 = jr + j * q_dim1;
1589 q__2.r = q__3.r * q[i__5].r - q__3.i * q[i__5].i, q__2.i =
1590 q__3.r * q[i__5].i + q__3.i * q[i__5].r;
1591 i__6 = jr + (j + 1) * q_dim1;
1592 q__4.r = c__ * q[i__6].r, q__4.i = c__ * q[i__6].i;
1593 q__1.r = q__2.r + q__4.r, q__1.i = q__2.i + q__4.i;
1594 q[i__4].r = q__1.r, q[i__4].i = q__1.i;
1595 i__4 = jr + j * q_dim1;
1596 q[i__4].r = ctemp.r, q[i__4].i = ctemp.i;
1601 i__3 = j + 1 + (j + 1) * t_dim1;
1602 ctemp.r = t[i__3].r, ctemp.i = t[i__3].i;
1603 clartg_(&ctemp, &t[j + 1 + j * t_dim1], &c__, &s, &t[j + 1 + (j +
1605 i__3 = j + 1 + j * t_dim1;
1606 t[i__3].r = 0.f, t[i__3].i = 0.f;
1610 i__3 = f2cmin(i__4,ilast);
1611 for (jr = ifrstm; jr <= i__3; ++jr) {
1612 i__4 = jr + (j + 1) * h_dim1;
1613 q__2.r = c__ * h__[i__4].r, q__2.i = c__ * h__[i__4].i;
1614 i__5 = jr + j * h_dim1;
1615 q__3.r = s.r * h__[i__5].r - s.i * h__[i__5].i, q__3.i = s.r *
1616 h__[i__5].i + s.i * h__[i__5].r;
1617 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1618 ctemp.r = q__1.r, ctemp.i = q__1.i;
1619 i__4 = jr + j * h_dim1;
1621 q__3.r = -q__4.r, q__3.i = -q__4.i;
1622 i__5 = jr + (j + 1) * h_dim1;
1623 q__2.r = q__3.r * h__[i__5].r - q__3.i * h__[i__5].i, q__2.i =
1624 q__3.r * h__[i__5].i + q__3.i * h__[i__5].r;
1625 i__6 = jr + j * h_dim1;
1626 q__5.r = c__ * h__[i__6].r, q__5.i = c__ * h__[i__6].i;
1627 q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
1628 h__[i__4].r = q__1.r, h__[i__4].i = q__1.i;
1629 i__4 = jr + (j + 1) * h_dim1;
1630 h__[i__4].r = ctemp.r, h__[i__4].i = ctemp.i;
1634 for (jr = ifrstm; jr <= i__3; ++jr) {
1635 i__4 = jr + (j + 1) * t_dim1;
1636 q__2.r = c__ * t[i__4].r, q__2.i = c__ * t[i__4].i;
1637 i__5 = jr + j * t_dim1;
1638 q__3.r = s.r * t[i__5].r - s.i * t[i__5].i, q__3.i = s.r * t[
1639 i__5].i + s.i * t[i__5].r;
1640 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1641 ctemp.r = q__1.r, ctemp.i = q__1.i;
1642 i__4 = jr + j * t_dim1;
1644 q__3.r = -q__4.r, q__3.i = -q__4.i;
1645 i__5 = jr + (j + 1) * t_dim1;
1646 q__2.r = q__3.r * t[i__5].r - q__3.i * t[i__5].i, q__2.i =
1647 q__3.r * t[i__5].i + q__3.i * t[i__5].r;
1648 i__6 = jr + j * t_dim1;
1649 q__5.r = c__ * t[i__6].r, q__5.i = c__ * t[i__6].i;
1650 q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
1651 t[i__4].r = q__1.r, t[i__4].i = q__1.i;
1652 i__4 = jr + (j + 1) * t_dim1;
1653 t[i__4].r = ctemp.r, t[i__4].i = ctemp.i;
1658 for (jr = 1; jr <= i__3; ++jr) {
1659 i__4 = jr + (j + 1) * z_dim1;
1660 q__2.r = c__ * z__[i__4].r, q__2.i = c__ * z__[i__4].i;
1661 i__5 = jr + j * z_dim1;
1662 q__3.r = s.r * z__[i__5].r - s.i * z__[i__5].i, q__3.i =
1663 s.r * z__[i__5].i + s.i * z__[i__5].r;
1664 q__1.r = q__2.r + q__3.r, q__1.i = q__2.i + q__3.i;
1665 ctemp.r = q__1.r, ctemp.i = q__1.i;
1666 i__4 = jr + j * z_dim1;
1668 q__3.r = -q__4.r, q__3.i = -q__4.i;
1669 i__5 = jr + (j + 1) * z_dim1;
1670 q__2.r = q__3.r * z__[i__5].r - q__3.i * z__[i__5].i,
1671 q__2.i = q__3.r * z__[i__5].i + q__3.i * z__[i__5]
1673 i__6 = jr + j * z_dim1;
1674 q__5.r = c__ * z__[i__6].r, q__5.i = c__ * z__[i__6].i;
1675 q__1.r = q__2.r + q__5.r, q__1.i = q__2.i + q__5.i;
1676 z__[i__4].r = q__1.r, z__[i__4].i = q__1.i;
1677 i__4 = jr + (j + 1) * z_dim1;
1678 z__[i__4].r = ctemp.r, z__[i__4].i = ctemp.i;
1691 /* Drop-through = non-convergence */
1697 /* Successful completion of all QZ steps */
1701 /* Set Eigenvalues 1:ILO-1 */
1704 for (j = 1; j <= i__1; ++j) {
1705 absb = c_abs(&t[j + j * t_dim1]);
1706 if (absb > safmin) {
1707 i__2 = j + j * t_dim1;
1708 q__2.r = t[i__2].r / absb, q__2.i = t[i__2].i / absb;
1709 r_cnjg(&q__1, &q__2);
1710 signbc.r = q__1.r, signbc.i = q__1.i;
1711 i__2 = j + j * t_dim1;
1712 t[i__2].r = absb, t[i__2].i = 0.f;
1715 cscal_(&i__2, &signbc, &t[j * t_dim1 + 1], &c__1);
1716 cscal_(&j, &signbc, &h__[j * h_dim1 + 1], &c__1);
1718 cscal_(&c__1, &signbc, &h__[j + j * h_dim1], &c__1);
1721 cscal_(n, &signbc, &z__[j * z_dim1 + 1], &c__1);
1724 i__2 = j + j * t_dim1;
1725 t[i__2].r = 0.f, t[i__2].i = 0.f;
1728 i__3 = j + j * h_dim1;
1729 alpha[i__2].r = h__[i__3].r, alpha[i__2].i = h__[i__3].i;
1731 i__3 = j + j * t_dim1;
1732 beta[i__2].r = t[i__3].r, beta[i__2].i = t[i__3].i;
1736 /* Normal Termination */
1740 /* Exit (other than argument error) -- return optimal workspace size */
1743 q__1.r = (real) (*n), q__1.i = 0.f;
1744 work[1].r = q__1.r, work[1].i = q__1.i;