14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/Cd(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
514 /* Table of constant values */
516 static doublecomplex c_b1 = {0.,0.};
517 static doublecomplex c_b2 = {1.,0.};
518 static integer c_n1 = -1;
519 static integer c__1 = 1;
520 static integer c__0 = 0;
521 static doublereal c_b141 = 1.;
522 static logical c_false = FALSE_;
524 /* > \brief \b ZGEJSV */
526 /* =========== DOCUMENTATION =========== */
528 /* Online html documentation available at */
529 /* http://www.netlib.org/lapack/explore-html/ */
532 /* > Download ZGEJSV + dependencies */
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zgejsv.
536 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zgejsv.
539 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zgejsv.
547 /* SUBROUTINE ZGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP, */
548 /* M, N, A, LDA, SVA, U, LDU, V, LDV, */
549 /* CWORK, LWORK, RWORK, LRWORK, IWORK, INFO ) */
552 /* INTEGER INFO, LDA, LDU, LDV, LWORK, M, N */
553 /* COMPLEX*16 A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( LWORK ) */
554 /* DOUBLE PRECISION SVA( N ), RWORK( LRWORK ) */
555 /* INTEGER IWORK( * ) */
556 /* CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV */
559 /* > \par Purpose: */
564 /* > ZGEJSV computes the singular value decomposition (SVD) of a complex M-by-N */
565 /* > matrix [A], where M >= N. The SVD of [A] is written as */
567 /* > [A] = [U] * [SIGMA] * [V]^*, */
569 /* > where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
570 /* > diagonal elements, [U] is an M-by-N (or M-by-M) unitary matrix, and */
571 /* > [V] is an N-by-N unitary matrix. The diagonal elements of [SIGMA] are */
572 /* > the singular values of [A]. The columns of [U] and [V] are the left and */
573 /* > the right singular vectors of [A], respectively. The matrices [U] and [V] */
574 /* > are computed and stored in the arrays U and V, respectively. The diagonal */
575 /* > of [SIGMA] is computed and stored in the array SVA. */
581 /* > \param[in] JOBA */
583 /* > JOBA is CHARACTER*1 */
584 /* > Specifies the level of accuracy: */
585 /* > = 'C': This option works well (high relative accuracy) if A = B * D, */
586 /* > with well-conditioned B and arbitrary diagonal matrix D. */
587 /* > The accuracy cannot be spoiled by COLUMN scaling. The */
588 /* > accuracy of the computed output depends on the condition of */
589 /* > B, and the procedure aims at the best theoretical accuracy. */
590 /* > The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
591 /* > bounded by f(M,N)*epsilon* cond(B), independent of D. */
592 /* > The input matrix is preprocessed with the QRF with column */
593 /* > pivoting. This initial preprocessing and preconditioning by */
594 /* > a rank revealing QR factorization is common for all values of */
595 /* > JOBA. Additional actions are specified as follows: */
596 /* > = 'E': Computation as with 'C' with an additional estimate of the */
597 /* > condition number of B. It provides a realistic error bound. */
598 /* > = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
599 /* > D1, D2, and well-conditioned matrix C, this option gives */
600 /* > higher accuracy than the 'C' option. If the structure of the */
601 /* > input matrix is not known, and relative accuracy is */
602 /* > desirable, then this option is advisable. The input matrix A */
603 /* > is preprocessed with QR factorization with FULL (row and */
604 /* > column) pivoting. */
605 /* > = 'G': Computation as with 'F' with an additional estimate of the */
606 /* > condition number of B, where A=B*D. If A has heavily weighted */
607 /* > rows, then using this condition number gives too pessimistic */
609 /* > = 'A': Small singular values are not well determined by the data */
610 /* > and are considered as noisy; the matrix is treated as */
611 /* > numerically rank deficient. The error in the computed */
612 /* > singular values is bounded by f(m,n)*epsilon*||A||. */
613 /* > The computed SVD A = U * S * V^* restores A up to */
614 /* > f(m,n)*epsilon*||A||. */
615 /* > This gives the procedure the licence to discard (set to zero) */
616 /* > all singular values below N*epsilon*||A||. */
617 /* > = 'R': Similar as in 'A'. Rank revealing property of the initial */
618 /* > QR factorization is used do reveal (using triangular factor) */
619 /* > a gap sigma_{r+1} < epsilon * sigma_r in which case the */
620 /* > numerical RANK is declared to be r. The SVD is computed with */
621 /* > absolute error bounds, but more accurately than with 'A'. */
624 /* > \param[in] JOBU */
626 /* > JOBU is CHARACTER*1 */
627 /* > Specifies whether to compute the columns of U: */
628 /* > = 'U': N columns of U are returned in the array U. */
629 /* > = 'F': full set of M left sing. vectors is returned in the array U. */
630 /* > = 'W': U may be used as workspace of length M*N. See the description */
632 /* > = 'N': U is not computed. */
635 /* > \param[in] JOBV */
637 /* > JOBV is CHARACTER*1 */
638 /* > Specifies whether to compute the matrix V: */
639 /* > = 'V': N columns of V are returned in the array V; Jacobi rotations */
640 /* > are not explicitly accumulated. */
641 /* > = 'J': N columns of V are returned in the array V, but they are */
642 /* > computed as the product of Jacobi rotations, if JOBT = 'N'. */
643 /* > = 'W': V may be used as workspace of length N*N. See the description */
645 /* > = 'N': V is not computed. */
648 /* > \param[in] JOBR */
650 /* > JOBR is CHARACTER*1 */
651 /* > Specifies the RANGE for the singular values. Issues the licence to */
652 /* > set to zero small positive singular values if they are outside */
653 /* > specified range. If A .NE. 0 is scaled so that the largest singular */
654 /* > value of c*A is around SQRT(BIG), BIG=DLAMCH('O'), then JOBR issues */
655 /* > the licence to kill columns of A whose norm in c*A is less than */
656 /* > SQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, */
657 /* > where SFMIN=DLAMCH('S'), EPSLN=DLAMCH('E'). */
658 /* > = 'N': Do not kill small columns of c*A. This option assumes that */
659 /* > BLAS and QR factorizations and triangular solvers are */
660 /* > implemented to work in that range. If the condition of A */
661 /* > is greater than BIG, use ZGESVJ. */
662 /* > = 'R': RESTRICTED range for sigma(c*A) is [SQRT(SFMIN), SQRT(BIG)] */
663 /* > (roughly, as described above). This option is recommended. */
664 /* > =========================== */
665 /* > For computing the singular values in the FULL range [SFMIN,BIG] */
669 /* > \param[in] JOBT */
671 /* > JOBT is CHARACTER*1 */
672 /* > If the matrix is square then the procedure may determine to use */
673 /* > transposed A if A^* seems to be better with respect to convergence. */
674 /* > If the matrix is not square, JOBT is ignored. */
675 /* > The decision is based on two values of entropy over the adjoint */
676 /* > orbit of A^* * A. See the descriptions of WORK(6) and WORK(7). */
677 /* > = 'T': transpose if entropy test indicates possibly faster */
678 /* > convergence of Jacobi process if A^* is taken as input. If A is */
679 /* > replaced with A^*, then the row pivoting is included automatically. */
680 /* > = 'N': do not speculate. */
681 /* > The option 'T' can be used to compute only the singular values, or */
682 /* > the full SVD (U, SIGMA and V). For only one set of singular vectors */
683 /* > (U or V), the caller should provide both U and V, as one of the */
684 /* > matrices is used as workspace if the matrix A is transposed. */
685 /* > The implementer can easily remove this constraint and make the */
686 /* > code more complicated. See the descriptions of U and V. */
687 /* > In general, this option is considered experimental, and 'N'; should */
688 /* > be preferred. This is subject to changes in the future. */
691 /* > \param[in] JOBP */
693 /* > JOBP is CHARACTER*1 */
694 /* > Issues the licence to introduce structured perturbations to drown */
695 /* > denormalized numbers. This licence should be active if the */
696 /* > denormals are poorly implemented, causing slow computation, */
697 /* > especially in cases of fast convergence (!). For details see [1,2]. */
698 /* > For the sake of simplicity, this perturbations are included only */
699 /* > when the full SVD or only the singular values are requested. The */
700 /* > implementer/user can easily add the perturbation for the cases of */
701 /* > computing one set of singular vectors. */
702 /* > = 'P': introduce perturbation */
703 /* > = 'N': do not perturb */
709 /* > The number of rows of the input matrix A. M >= 0. */
715 /* > The number of columns of the input matrix A. M >= N >= 0. */
718 /* > \param[in,out] A */
720 /* > A is COMPLEX*16 array, dimension (LDA,N) */
721 /* > On entry, the M-by-N matrix A. */
724 /* > \param[in] LDA */
726 /* > LDA is INTEGER */
727 /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
730 /* > \param[out] SVA */
732 /* > SVA is DOUBLE PRECISION array, dimension (N) */
734 /* > - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
735 /* > computation SVA contains Euclidean column norms of the */
736 /* > iterated matrices in the array A. */
737 /* > - For WORK(1) .NE. WORK(2): The singular values of A are */
738 /* > (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
739 /* > sigma_max(A) overflows or if small singular values have been */
740 /* > saved from underflow by scaling the input matrix A. */
741 /* > - If JOBR='R' then some of the singular values may be returned */
742 /* > as exact zeros obtained by "set to zero" because they are */
743 /* > below the numerical rank threshold or are denormalized numbers. */
746 /* > \param[out] U */
748 /* > U is COMPLEX*16 array, dimension ( LDU, N ) */
749 /* > If JOBU = 'U', then U contains on exit the M-by-N matrix of */
750 /* > the left singular vectors. */
751 /* > If JOBU = 'F', then U contains on exit the M-by-M matrix of */
752 /* > the left singular vectors, including an ONB */
753 /* > of the orthogonal complement of the Range(A). */
754 /* > If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), */
755 /* > then U is used as workspace if the procedure */
756 /* > replaces A with A^*. In that case, [V] is computed */
757 /* > in U as left singular vectors of A^* and then */
758 /* > copied back to the V array. This 'W' option is just */
759 /* > a reminder to the caller that in this case U is */
760 /* > reserved as workspace of length N*N. */
761 /* > If JOBU = 'N' U is not referenced, unless JOBT='T'. */
764 /* > \param[in] LDU */
766 /* > LDU is INTEGER */
767 /* > The leading dimension of the array U, LDU >= 1. */
768 /* > IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
771 /* > \param[out] V */
773 /* > V is COMPLEX*16 array, dimension ( LDV, N ) */
774 /* > If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
775 /* > the right singular vectors; */
776 /* > If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), */
777 /* > then V is used as workspace if the pprocedure */
778 /* > replaces A with A^*. In that case, [U] is computed */
779 /* > in V as right singular vectors of A^* and then */
780 /* > copied back to the U array. This 'W' option is just */
781 /* > a reminder to the caller that in this case V is */
782 /* > reserved as workspace of length N*N. */
783 /* > If JOBV = 'N' V is not referenced, unless JOBT='T'. */
786 /* > \param[in] LDV */
788 /* > LDV is INTEGER */
789 /* > The leading dimension of the array V, LDV >= 1. */
790 /* > If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
793 /* > \param[out] CWORK */
795 /* > CWORK is COMPLEX*16 array, dimension (MAX(2,LWORK)) */
796 /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
797 /* > LRWORK=-1), then on exit CWORK(1) contains the required length of */
798 /* > CWORK for the job parameters used in the call. */
801 /* > \param[in] LWORK */
803 /* > LWORK is INTEGER */
804 /* > Length of CWORK to confirm proper allocation of workspace. */
805 /* > LWORK depends on the job: */
807 /* > 1. If only SIGMA is needed ( JOBU = 'N', JOBV = 'N' ) and */
808 /* > 1.1 .. no scaled condition estimate required (JOBA.NE.'E'.AND.JOBA.NE.'G'): */
809 /* > LWORK >= 2*N+1. This is the minimal requirement. */
810 /* > ->> For optimal performance (blocked code) the optimal value */
811 /* > is LWORK >= N + (N+1)*NB. Here NB is the optimal */
812 /* > block size for ZGEQP3 and ZGEQRF. */
813 /* > In general, optimal LWORK is computed as */
814 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZGEQRF), LWORK(ZGESVJ)). */
815 /* > 1.2. .. an estimate of the scaled condition number of A is */
816 /* > required (JOBA='E', or 'G'). In this case, LWORK the minimal */
817 /* > requirement is LWORK >= N*N + 2*N. */
818 /* > ->> For optimal performance (blocked code) the optimal value */
819 /* > is LWORK >= f2cmax(N+(N+1)*NB, N*N+2*N)=N**2+2*N. */
820 /* > In general, the optimal length LWORK is computed as */
821 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZGEQRF), LWORK(ZGESVJ), */
822 /* > N*N+LWORK(ZPOCON)). */
823 /* > 2. If SIGMA and the right singular vectors are needed (JOBV = 'V'), */
825 /* > 2.1 .. no scaled condition estimate requested (JOBE = 'N'): */
826 /* > -> the minimal requirement is LWORK >= 3*N. */
827 /* > -> For optimal performance, */
828 /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
829 /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZGELQ, */
830 /* > ZUNMLQ. In general, the optimal length LWORK is computed as */
831 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), N+LWORK(ZGESVJ), */
832 /* > N+LWORK(ZGELQF), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMLQ)). */
833 /* > 2.2 .. an estimate of the scaled condition number of A is */
834 /* > required (JOBA='E', or 'G'). */
835 /* > -> the minimal requirement is LWORK >= 3*N. */
836 /* > -> For optimal performance, */
837 /* > LWORK >= f2cmax(N+(N+1)*NB, 2*N,2*N+N*NB)=2*N+N*NB, */
838 /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZGELQ, */
839 /* > ZUNMLQ. In general, the optimal length LWORK is computed as */
840 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), LWORK(ZPOCON), N+LWORK(ZGESVJ), */
841 /* > N+LWORK(ZGELQF), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMLQ)). */
842 /* > 3. If SIGMA and the left singular vectors are needed */
843 /* > 3.1 .. no scaled condition estimate requested (JOBE = 'N'): */
844 /* > -> the minimal requirement is LWORK >= 3*N. */
845 /* > -> For optimal performance: */
846 /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
847 /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. */
848 /* > In general, the optimal length LWORK is computed as */
849 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3), 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). */
850 /* > 3.2 .. an estimate of the scaled condition number of A is */
851 /* > required (JOBA='E', or 'G'). */
852 /* > -> the minimal requirement is LWORK >= 3*N. */
853 /* > -> For optimal performance: */
854 /* > if JOBU = 'U' :: LWORK >= f2cmax(3*N, N+(N+1)*NB, 2*N+N*NB)=2*N+N*NB, */
855 /* > where NB is the optimal block size for ZGEQP3, ZGEQRF, ZUNMQR. */
856 /* > In general, the optimal length LWORK is computed as */
857 /* > LWORK >= f2cmax(N+LWORK(ZGEQP3),N+LWORK(ZPOCON), */
858 /* > 2*N+LWORK(ZGEQRF), N+LWORK(ZUNMQR)). */
859 /* > 4. If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and */
860 /* > 4.1. if JOBV = 'V' */
861 /* > the minimal requirement is LWORK >= 5*N+2*N*N. */
862 /* > 4.2. if JOBV = 'J' the minimal requirement is */
863 /* > LWORK >= 4*N+N*N. */
864 /* > In both cases, the allocated CWORK can accommodate blocked runs */
865 /* > of ZGEQP3, ZGEQRF, ZGELQF, SUNMQR, ZUNMLQ. */
867 /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
868 /* > LRWORK=-1), then on exit CWORK(1) contains the optimal and CWORK(2) contains the */
869 /* > minimal length of CWORK for the job parameters used in the call. */
872 /* > \param[out] RWORK */
874 /* > RWORK is DOUBLE PRECISION array, dimension (MAX(7,LWORK)) */
876 /* > RWORK(1) = Determines the scaling factor SCALE = RWORK(2) / RWORK(1) */
877 /* > such that SCALE*SVA(1:N) are the computed singular values */
878 /* > of A. (See the description of SVA().) */
879 /* > RWORK(2) = See the description of RWORK(1). */
880 /* > RWORK(3) = SCONDA is an estimate for the condition number of */
881 /* > column equilibrated A. (If JOBA = 'E' or 'G') */
882 /* > SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
883 /* > It is computed using SPOCON. It holds */
884 /* > N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
885 /* > where R is the triangular factor from the QRF of A. */
886 /* > However, if R is truncated and the numerical rank is */
887 /* > determined to be strictly smaller than N, SCONDA is */
888 /* > returned as -1, thus indicating that the smallest */
889 /* > singular values might be lost. */
891 /* > If full SVD is needed, the following two condition numbers are */
892 /* > useful for the analysis of the algorithm. They are provied for */
893 /* > a developer/implementer who is familiar with the details of */
896 /* > RWORK(4) = an estimate of the scaled condition number of the */
897 /* > triangular factor in the first QR factorization. */
898 /* > RWORK(5) = an estimate of the scaled condition number of the */
899 /* > triangular factor in the second QR factorization. */
900 /* > The following two parameters are computed if JOBT = 'T'. */
901 /* > They are provided for a developer/implementer who is familiar */
902 /* > with the details of the method. */
903 /* > RWORK(6) = the entropy of A^* * A :: this is the Shannon entropy */
904 /* > of diag(A^* * A) / Trace(A^* * A) taken as point in the */
905 /* > probability simplex. */
906 /* > RWORK(7) = the entropy of A * A^*. (See the description of RWORK(6).) */
907 /* > If the call to ZGEJSV is a workspace query (indicated by LWORK=-1 or */
908 /* > LRWORK=-1), then on exit RWORK(1) contains the required length of */
909 /* > RWORK for the job parameters used in the call. */
912 /* > \param[in] LRWORK */
914 /* > LRWORK is INTEGER */
915 /* > Length of RWORK to confirm proper allocation of workspace. */
916 /* > LRWORK depends on the job: */
918 /* > 1. If only the singular values are requested i.e. if */
919 /* > LSAME(JOBU,'N') .AND. LSAME(JOBV,'N') */
921 /* > 1.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
922 /* > then: LRWORK = f2cmax( 7, 2 * M ). */
923 /* > 1.2. Otherwise, LRWORK = f2cmax( 7, N ). */
924 /* > 2. If singular values with the right singular vectors are requested */
926 /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) .AND. */
927 /* > .NOT.(LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) */
929 /* > 2.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
930 /* > then LRWORK = f2cmax( 7, 2 * M ). */
931 /* > 2.2. Otherwise, LRWORK = f2cmax( 7, N ). */
932 /* > 3. If singular values with the left singular vectors are requested, i.e. if */
933 /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
934 /* > .NOT.(LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
936 /* > 3.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
937 /* > then LRWORK = f2cmax( 7, 2 * M ). */
938 /* > 3.2. Otherwise, LRWORK = f2cmax( 7, N ). */
939 /* > 4. If singular values with both the left and the right singular vectors */
940 /* > are requested, i.e. if */
941 /* > (LSAME(JOBU,'U').OR.LSAME(JOBU,'F')) .AND. */
942 /* > (LSAME(JOBV,'V').OR.LSAME(JOBV,'J')) */
944 /* > 4.1. If LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G'), */
945 /* > then LRWORK = f2cmax( 7, 2 * M ). */
946 /* > 4.2. Otherwise, LRWORK = f2cmax( 7, N ). */
948 /* > If, on entry, LRWORK = -1 or LWORK=-1, a workspace query is assumed and */
949 /* > the length of RWORK is returned in RWORK(1). */
952 /* > \param[out] IWORK */
954 /* > IWORK is INTEGER array, of dimension at least 4, that further depends */
957 /* > 1. If only the singular values are requested then: */
958 /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
959 /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
960 /* > 2. If the singular values and the right singular vectors are requested then: */
961 /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
962 /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
963 /* > 3. If the singular values and the left singular vectors are requested then: */
964 /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
965 /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
966 /* > 4. If the singular values with both the left and the right singular vectors */
967 /* > are requested, then: */
968 /* > 4.1. If LSAME(JOBV,'J') the length of IWORK is determined as follows: */
969 /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
970 /* > then the length of IWORK is N+M; otherwise the length of IWORK is N. */
971 /* > 4.2. If LSAME(JOBV,'V') the length of IWORK is determined as follows: */
972 /* > If ( LSAME(JOBT,'T') .OR. LSAME(JOBA,'F') .OR. LSAME(JOBA,'G') ) */
973 /* > then the length of IWORK is 2*N+M; otherwise the length of IWORK is 2*N. */
976 /* > IWORK(1) = the numerical rank determined after the initial */
977 /* > QR factorization with pivoting. See the descriptions */
978 /* > of JOBA and JOBR. */
979 /* > IWORK(2) = the number of the computed nonzero singular values */
980 /* > IWORK(3) = if nonzero, a warning message: */
981 /* > If IWORK(3) = 1 then some of the column norms of A */
982 /* > were denormalized floats. The requested high accuracy */
983 /* > is not warranted by the data. */
984 /* > IWORK(4) = 1 or -1. If IWORK(4) = 1, then the procedure used A^* to */
985 /* > do the job as specified by the JOB parameters. */
986 /* > If the call to ZGEJSV is a workspace query (indicated by LWORK = -1 or */
987 /* > LRWORK = -1), then on exit IWORK(1) contains the required length of */
988 /* > IWORK for the job parameters used in the call. */
991 /* > \param[out] INFO */
993 /* > INFO is INTEGER */
994 /* > < 0: if INFO = -i, then the i-th argument had an illegal value. */
995 /* > = 0: successful exit; */
996 /* > > 0: ZGEJSV did not converge in the maximal allowed number */
997 /* > of sweeps. The computed values may be inaccurate. */
1003 /* > \author Univ. of Tennessee */
1004 /* > \author Univ. of California Berkeley */
1005 /* > \author Univ. of Colorado Denver */
1006 /* > \author NAG Ltd. */
1008 /* > \date June 2016 */
1010 /* > \ingroup complex16GEsing */
1012 /* > \par Further Details: */
1013 /* ===================== */
1017 /* > ZGEJSV implements a preconditioned Jacobi SVD algorithm. It uses ZGEQP3, */
1018 /* > ZGEQRF, and ZGELQF as preprocessors and preconditioners. Optionally, an */
1019 /* > additional row pivoting can be used as a preprocessor, which in some */
1020 /* > cases results in much higher accuracy. An example is matrix A with the */
1021 /* > structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
1022 /* > diagonal matrices and C is well-conditioned matrix. In that case, complete */
1023 /* > pivoting in the first QR factorizations provides accuracy dependent on the */
1024 /* > condition number of C, and independent of D1, D2. Such higher accuracy is */
1025 /* > not completely understood theoretically, but it works well in practice. */
1026 /* > Further, if A can be written as A = B*D, with well-conditioned B and some */
1027 /* > diagonal D, then the high accuracy is guaranteed, both theoretically and */
1028 /* > in software, independent of D. For more details see [1], [2]. */
1029 /* > The computational range for the singular values can be the full range */
1030 /* > ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
1031 /* > & LAPACK routines called by ZGEJSV are implemented to work in that range. */
1032 /* > If that is not the case, then the restriction for safe computation with */
1033 /* > the singular values in the range of normalized IEEE numbers is that the */
1034 /* > spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
1035 /* > overflow. This code (ZGEJSV) is best used in this restricted range, */
1036 /* > meaning that singular values of magnitude below ||A||_2 / DLAMCH('O') are */
1037 /* > returned as zeros. See JOBR for details on this. */
1038 /* > Further, this implementation is somewhat slower than the one described */
1039 /* > in [1,2] due to replacement of some non-LAPACK components, and because */
1040 /* > the choice of some tuning parameters in the iterative part (ZGESVJ) is */
1041 /* > left to the implementer on a particular machine. */
1042 /* > The rank revealing QR factorization (in this code: ZGEQP3) should be */
1043 /* > implemented as in [3]. We have a new version of ZGEQP3 under development */
1044 /* > that is more robust than the current one in LAPACK, with a cleaner cut in */
1045 /* > rank deficient cases. It will be available in the SIGMA library [4]. */
1046 /* > If M is much larger than N, it is obvious that the initial QRF with */
1047 /* > column pivoting can be preprocessed by the QRF without pivoting. That */
1048 /* > well known trick is not used in ZGEJSV because in some cases heavy row */
1049 /* > weighting can be treated with complete pivoting. The overhead in cases */
1050 /* > M much larger than N is then only due to pivoting, but the benefits in */
1051 /* > terms of accuracy have prevailed. The implementer/user can incorporate */
1052 /* > this extra QRF step easily. The implementer can also improve data movement */
1053 /* > (matrix transpose, matrix copy, matrix transposed copy) - this */
1054 /* > implementation of ZGEJSV uses only the simplest, naive data movement. */
1055 /* > \endverbatim */
1057 /* > \par Contributor: */
1058 /* ================== */
1060 /* > Zlatko Drmac, Department of Mathematics, Faculty of Science, */
1061 /* > University of Zagreb (Zagreb, Croatia); drmac@math.hr */
1063 /* > \par References: */
1064 /* ================ */
1068 /* > [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
1069 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
1070 /* > LAPACK Working note 169. */
1071 /* > [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
1072 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
1073 /* > LAPACK Working note 170. */
1074 /* > [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
1075 /* > factorization software - a case study. */
1076 /* > ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
1077 /* > LAPACK Working note 176. */
1078 /* > [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
1079 /* > QSVD, (H,K)-SVD computations. */
1080 /* > Department of Mathematics, University of Zagreb, 2008, 2016. */
1081 /* > \endverbatim */
1083 /* > \par Bugs, examples and comments: */
1084 /* ================================= */
1086 /* > Please report all bugs and send interesting examples and/or comments to */
1087 /* > drmac@math.hr. Thank you. */
1089 /* ===================================================================== */
1090 /* Subroutine */ int zgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
1091 char *jobt, char *jobp, integer *m, integer *n, doublecomplex *a,
1092 integer *lda, doublereal *sva, doublecomplex *u, integer *ldu,
1093 doublecomplex *v, integer *ldv, doublecomplex *cwork, integer *lwork,
1094 doublereal *rwork, integer *lrwork, integer *iwork, integer *info)
1096 /* System generated locals */
1097 integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
1098 i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11;
1099 doublereal d__1, d__2, d__3;
1102 /* Local variables */
1103 integer lwrk_zgesvj__;
1105 doublereal aapp, aaqq;
1106 integer lwrk_zunmlq__, lwrk_zunmqr__;
1108 integer ierr, lwrk_zgeqp3n__;
1110 integer lwunmqrm, lwqp3, p, q;
1112 extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
1114 integer lwrk_zgesvju__, lwrk_zgesvjv__;
1115 extern logical lsame_(char *, char *);
1116 integer lwrk_zunmqrm__;
1117 doublecomplex ctemp;
1118 doublereal entra, small;
1124 integer lwcon, lwlqf, lwqrf, n1;
1125 extern /* Subroutine */ int zcopy_(integer *, doublecomplex *, integer *,
1126 doublecomplex *, integer *), zswap_(integer *, doublecomplex *,
1127 integer *, doublecomplex *, integer *);
1129 extern /* Subroutine */ int ztrsm_(char *, char *, char *, char *,
1130 integer *, integer *, doublecomplex *, doublecomplex *, integer *,
1131 doublecomplex *, integer *);
1132 doublereal condr1, condr2, uscal1, uscal2;
1133 logical l2kill, l2rank, l2tran, l2pert;
1134 extern /* Subroutine */ int zgeqp3_(integer *, integer *, doublecomplex *,
1135 integer *, integer *, doublecomplex *, doublecomplex *, integer *
1136 , doublereal *, integer *);
1137 extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
1139 extern doublereal dlamch_(char *);
1141 extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
1142 doublereal *, doublereal *, integer *, integer *, doublereal *,
1143 integer *, integer *);
1144 extern integer idamax_(integer *, doublereal *, integer *);
1145 doublereal scalem, sconda;
1147 doublereal aatmin, aatmax;
1148 extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
1150 extern /* Subroutine */ int zdscal_(integer *, doublereal *,
1151 doublecomplex *, integer *), zlacgv_(integer *, doublecomplex *,
1152 integer *), dlassq_(integer *, doublereal *, integer *,
1153 doublereal *, doublereal *);
1154 extern integer izamax_(integer *, doublecomplex *, integer *);
1155 extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
1156 integer *, doublecomplex *, doublecomplex *, integer *, integer *
1157 ), zlascl_(char *, integer *, integer *, doublereal *, doublereal
1158 *, integer *, integer *, doublecomplex *, integer *, integer *);
1161 doublecomplex cdummy[1];
1162 extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
1163 integer *, doublecomplex *, doublecomplex *, integer *, integer *
1166 extern /* Subroutine */ int zlacpy_(char *, integer *, integer *,
1167 doublecomplex *, integer *, doublecomplex *, integer *),
1168 zlaset_(char *, integer *, integer *, doublecomplex *,
1169 doublecomplex *, doublecomplex *, integer *);
1172 extern /* Subroutine */ int zlapmr_(logical *, integer *, integer *,
1173 doublecomplex *, integer *, integer *);
1175 integer minwrk, lwsvdj;
1176 extern /* Subroutine */ int zpocon_(char *, integer *, doublecomplex *,
1177 integer *, doublereal *, doublereal *, doublecomplex *,
1178 doublereal *, integer *), zgesvj_(char *, char *, char *,
1179 integer *, integer *, doublecomplex *, integer *, doublereal *,
1180 integer *, doublecomplex *, integer *, doublecomplex *, integer *,
1181 doublereal *, integer *, integer *);
1182 doublereal rdummy[1];
1183 extern /* Subroutine */ int zlassq_(integer *, doublecomplex *, integer *,
1184 doublereal *, doublereal *);
1186 extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
1187 integer *, integer *, integer *, integer *);
1190 extern /* Subroutine */ int zungqr_(integer *, integer *, integer *,
1191 doublecomplex *, integer *, doublecomplex *, doublecomplex *,
1192 integer *, integer *), zunmlq_(char *, char *, integer *, integer
1193 *, integer *, doublecomplex *, integer *, doublecomplex *,
1194 doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
1195 integer *, doublecomplex *, integer *, doublecomplex *,
1196 doublecomplex *, integer *, doublecomplex *, integer *, integer *);
1197 doublereal big, cond_ok__, xsc;
1198 integer lwrk_zgeqp3__;
1200 integer warning, numrank, miniwrk, minrwrk, lrwsvdj, lwunmlq, lwsvdjv,
1201 lwunmqr, lwrk_zgelqf__, lwrk_zgeqrf__;
1204 /* -- LAPACK computational routine (version 3.7.1) -- */
1205 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
1206 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
1210 /* =========================================================================== */
1216 /* Test the input arguments */
1218 /* Parameter adjustments */
1221 a_offset = 1 + a_dim1 * 1;
1224 u_offset = 1 + u_dim1 * 1;
1227 v_offset = 1 + v_dim1 * 1;
1234 lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
1235 jracc = lsame_(jobv, "J");
1236 rsvec = lsame_(jobv, "V") || jracc;
1237 rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
1238 l2rank = lsame_(joba, "R");
1239 l2aber = lsame_(joba, "A");
1240 errest = lsame_(joba, "E") || lsame_(joba, "G");
1241 l2tran = lsame_(jobt, "T") && *m == *n;
1242 l2kill = lsame_(jobr, "R");
1243 defr = lsame_(jobr, "N");
1244 l2pert = lsame_(jobp, "P");
1246 lquery = *lwork == -1 || *lrwork == -1;
1248 if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
1250 } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
1251 jobu, "W") && rsvec && l2tran)) {
1253 } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
1254 jobv, "W") && lsvec && l2tran)) {
1256 } else if (! (l2kill || defr)) {
1258 } else if (! (lsame_(jobt, "T") || lsame_(jobt,
1261 } else if (! (l2pert || lsame_(jobp, "N"))) {
1263 } else if (*m < 0) {
1265 } else if (*n < 0 || *n > *m) {
1267 } else if (*lda < *m) {
1269 } else if (lsvec && *ldu < *m) {
1271 } else if (rsvec && *ldv < *n) {
1279 /* [[The expressions for computing the minimal and the optimal */
1280 /* values of LCWORK, LRWORK are written with a lot of redundancy and */
1281 /* can be simplified. However, this verbose form is useful for */
1282 /* maintenance and modifications of the code.]] */
1284 /* ZGEQRF of an N x N matrix, ZGELQF of an N x N matrix, */
1285 /* ZUNMLQ for computing N x N matrix, ZUNMQR for computing N x N */
1286 /* matrix, ZUNMQR for computing M x N matrix, respectively. */
1288 lwqrf = f2cmax(1,*n);
1289 lwlqf = f2cmax(1,*n);
1290 lwunmlq = f2cmax(1,*n);
1291 lwunmqr = f2cmax(1,*n);
1292 lwunmqrm = f2cmax(1,*m);
1294 /* without and with explicit accumulation of Jacobi rotations */
1297 lwsvdj = f2cmax(i__1,1);
1300 lwsvdjv = f2cmax(i__1,1);
1305 zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], cdummy, cdummy, &c_n1,
1307 lwrk_zgeqp3__ = (integer) cdummy[0].r;
1308 zgeqrf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
1309 lwrk_zgeqrf__ = (integer) cdummy[0].r;
1310 zgelqf_(n, n, &a[a_offset], lda, cdummy, cdummy, &c_n1, &ierr);
1311 lwrk_zgelqf__ = (integer) cdummy[0].r;
1316 if (! (lsvec || rsvec)) {
1317 /* only the singular values are requested */
1320 /* Computing 2nd power */
1322 i__1 = *n + lwqp3, i__2 = i__3 * i__3 + lwcon, i__1 = f2cmax(
1323 i__1,i__2), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
1324 minwrk = f2cmax(i__1,lwsvdj);
1327 i__1 = *n + lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2);
1328 minwrk = f2cmax(i__1,lwsvdj);
1331 zgesvj_("L", "N", "N", n, n, &a[a_offset], lda, &sva[1], n, &
1332 v[v_offset], ldv, cdummy, &c_n1, rdummy, &c_n1, &ierr);
1333 lwrk_zgesvj__ = (integer) cdummy[0].r;
1336 /* Computing 2nd power */
1338 i__1 = *n + lwrk_zgeqp3__, i__2 = i__3 * i__3 + lwcon,
1339 i__1 = f2cmax(i__1,i__2), i__2 = *n + lwrk_zgeqrf__,
1340 i__1 = f2cmax(i__1,i__2);
1341 optwrk = f2cmax(i__1,lwrk_zgesvj__);
1344 i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwrk_zgeqrf__,
1345 i__1 = f2cmax(i__1,i__2);
1346 optwrk = f2cmax(i__1,lwrk_zgesvj__);
1349 if (l2tran || rowpiv) {
1352 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1353 f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
1354 minrwrk = f2cmax(i__1,lrwsvdj);
1357 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1358 f2cmax(i__1,lrwqp3);
1359 minrwrk = f2cmax(i__1,lrwsvdj);
1364 i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwcon);
1365 minrwrk = f2cmax(i__1,lrwsvdj);
1368 i__1 = f2cmax(7,lrwqp3);
1369 minrwrk = f2cmax(i__1,lrwsvdj);
1372 if (rowpiv || l2tran) {
1375 } else if (rsvec && ! lsvec) {
1376 /* singular values and the right singular vectors are requested */
1379 i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwcon), i__1 = f2cmax(i__1,
1380 lwsvdj), i__2 = *n + lwlqf, i__1 = f2cmax(i__1,i__2),
1381 i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,i__2), i__2
1382 = *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 = *n +
1384 minwrk = f2cmax(i__1,i__2);
1387 i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwsvdj), i__2 = *n + lwlqf,
1388 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
1389 i__1 = f2cmax(i__1,i__2), i__2 = *n + lwsvdj, i__1 = f2cmax(
1390 i__1,i__2), i__2 = *n + lwunmlq;
1391 minwrk = f2cmax(i__1,i__2);
1394 zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
1395 a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
1396 lwrk_zgesvj__ = (integer) cdummy[0].r;
1397 zunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
1398 v_offset], ldv, cdummy, &c_n1, &ierr);
1399 lwrk_zunmlq__ = (integer) cdummy[0].r;
1402 i__1 = *n + lwrk_zgeqp3__, i__1 = f2cmax(i__1,lwcon), i__1 =
1403 f2cmax(i__1,lwrk_zgesvj__), i__2 = *n +
1404 lwrk_zgelqf__, i__1 = f2cmax(i__1,i__2), i__2 = (*n
1405 << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2),
1406 i__2 = *n + lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2),
1407 i__2 = *n + lwrk_zunmlq__;
1408 optwrk = f2cmax(i__1,i__2);
1411 i__1 = *n + lwrk_zgeqp3__, i__1 = f2cmax(i__1,lwrk_zgesvj__),
1412 i__2 = *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,i__2),
1413 i__2 = (*n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(
1414 i__1,i__2), i__2 = *n + lwrk_zgesvj__, i__1 = f2cmax(
1415 i__1,i__2), i__2 = *n + lwrk_zunmlq__;
1416 optwrk = f2cmax(i__1,i__2);
1419 if (l2tran || rowpiv) {
1422 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1423 f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1424 minrwrk = f2cmax(i__1,lrwcon);
1427 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1428 f2cmax(i__1,lrwqp3);
1429 minrwrk = f2cmax(i__1,lrwsvdj);
1434 i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1435 minrwrk = f2cmax(i__1,lrwcon);
1438 i__1 = f2cmax(7,lrwqp3);
1439 minrwrk = f2cmax(i__1,lrwsvdj);
1442 if (rowpiv || l2tran) {
1445 } else if (lsvec && ! rsvec) {
1446 /* singular values and the left singular vectors are requested */
1449 i__1 = f2cmax(lwqp3,lwcon), i__2 = *n + lwqrf, i__1 = f2cmax(i__1,
1450 i__2), i__1 = f2cmax(i__1,lwsvdj);
1451 minwrk = *n + f2cmax(i__1,lwunmqrm);
1454 i__1 = lwqp3, i__2 = *n + lwqrf, i__1 = f2cmax(i__1,i__2), i__1 =
1455 f2cmax(i__1,lwsvdj);
1456 minwrk = *n + f2cmax(i__1,lwunmqrm);
1459 zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1], n, &
1460 a[a_offset], lda, cdummy, &c_n1, rdummy, &c_n1, &ierr);
1461 lwrk_zgesvj__ = (integer) cdummy[0].r;
1462 zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
1463 u_offset], ldu, cdummy, &c_n1, &ierr);
1464 lwrk_zunmqrm__ = (integer) cdummy[0].r;
1467 i__1 = f2cmax(lwrk_zgeqp3__,lwcon), i__2 = *n +
1468 lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
1469 i__1,lwrk_zgesvj__);
1470 optwrk = *n + f2cmax(i__1,lwrk_zunmqrm__);
1473 i__1 = lwrk_zgeqp3__, i__2 = *n + lwrk_zgeqrf__, i__1 =
1474 f2cmax(i__1,i__2), i__1 = f2cmax(i__1,lwrk_zgesvj__);
1475 optwrk = *n + f2cmax(i__1,lwrk_zunmqrm__);
1478 if (l2tran || rowpiv) {
1481 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1482 f2cmax(i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1483 minrwrk = f2cmax(i__1,lrwcon);
1486 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 =
1487 f2cmax(i__1,lrwqp3);
1488 minrwrk = f2cmax(i__1,lrwsvdj);
1493 i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1494 minrwrk = f2cmax(i__1,lrwcon);
1497 i__1 = f2cmax(7,lrwqp3);
1498 minrwrk = f2cmax(i__1,lrwsvdj);
1501 if (rowpiv || l2tran) {
1505 /* full SVD is requested */
1509 /* Computing 2nd power */
1511 /* Computing 2nd power */
1513 /* Computing 2nd power */
1515 /* Computing 2nd power */
1517 /* Computing 2nd power */
1519 /* Computing 2nd power */
1521 /* Computing 2nd power */
1523 /* Computing 2nd power */
1525 /* Computing 2nd power */
1527 i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
1528 i__2), i__2 = (*n << 1) + i__3 * i__3 + lwcon,
1529 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
1530 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
1531 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
1532 i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
1533 *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
1534 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
1535 i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
1536 (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
1537 f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
1538 n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
1539 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
1540 i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
1541 i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
1542 minwrk = f2cmax(i__1,i__2);
1545 /* Computing 2nd power */
1547 /* Computing 2nd power */
1549 /* Computing 2nd power */
1551 /* Computing 2nd power */
1553 /* Computing 2nd power */
1555 /* Computing 2nd power */
1557 /* Computing 2nd power */
1559 /* Computing 2nd power */
1561 /* Computing 2nd power */
1563 i__1 = *n + lwqp3, i__2 = (*n << 1) + i__3 * i__3 + lwcon,
1564 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqrf,
1565 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + lwqp3,
1566 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
1567 i__4 + *n + lwlqf, i__1 = f2cmax(i__1,i__2), i__2 = (
1568 *n << 1) + i__5 * i__5 + *n + i__6 * i__6 + lwcon,
1569 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__7 *
1570 i__7 + *n + lwsvdj, i__1 = f2cmax(i__1,i__2), i__2 =
1571 (*n << 1) + i__8 * i__8 + *n + lwsvdjv, i__1 =
1572 f2cmax(i__1,i__2), i__2 = (*n << 1) + i__9 * i__9 + *
1573 n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 = (*n <<
1574 1) + i__10 * i__10 + *n + lwunmlq, i__1 = f2cmax(
1575 i__1,i__2), i__2 = *n + i__11 * i__11 + lwsvdj,
1576 i__1 = f2cmax(i__1,i__2), i__2 = *n + lwunmqrm;
1577 minwrk = f2cmax(i__1,i__2);
1580 if (rowpiv || l2tran) {
1586 /* Computing 2nd power */
1588 /* Computing 2nd power */
1590 i__1 = *n + lwqp3, i__2 = *n + lwcon, i__1 = f2cmax(i__1,
1591 i__2), i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(i__1,
1592 i__2), i__2 = (*n << 1) + i__3 * i__3 + lwsvdjv,
1593 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
1594 i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,i__2), i__2 =
1596 minwrk = f2cmax(i__1,i__2);
1599 /* Computing 2nd power */
1601 /* Computing 2nd power */
1603 i__1 = *n + lwqp3, i__2 = (*n << 1) + lwqrf, i__1 = f2cmax(
1604 i__1,i__2), i__2 = (*n << 1) + i__3 * i__3 +
1605 lwsvdjv, i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1)
1606 + i__4 * i__4 + *n + lwunmqr, i__1 = f2cmax(i__1,
1607 i__2), i__2 = *n + lwunmqrm;
1608 minwrk = f2cmax(i__1,i__2);
1610 if (rowpiv || l2tran) {
1615 zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
1616 u_offset], ldu, cdummy, &c_n1, &ierr);
1617 lwrk_zunmqrm__ = (integer) cdummy[0].r;
1618 zunmqr_("L", "N", n, n, n, &a[a_offset], lda, cdummy, &u[
1619 u_offset], ldu, cdummy, &c_n1, &ierr);
1620 lwrk_zunmqr__ = (integer) cdummy[0].r;
1622 zgeqp3_(n, n, &a[a_offset], lda, &iwork[1], cdummy,
1623 cdummy, &c_n1, rdummy, &ierr);
1624 lwrk_zgeqp3n__ = (integer) cdummy[0].r;
1625 zgesvj_("L", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
1626 n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1628 lwrk_zgesvj__ = (integer) cdummy[0].r;
1629 zgesvj_("U", "U", "N", n, n, &u[u_offset], ldu, &sva[1],
1630 n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1632 lwrk_zgesvju__ = (integer) cdummy[0].r;
1633 zgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
1634 n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1636 lwrk_zgesvjv__ = (integer) cdummy[0].r;
1637 zunmlq_("L", "C", n, n, n, &a[a_offset], lda, cdummy, &v[
1638 v_offset], ldv, cdummy, &c_n1, &ierr);
1639 lwrk_zunmlq__ = (integer) cdummy[0].r;
1642 /* Computing 2nd power */
1644 /* Computing 2nd power */
1646 /* Computing 2nd power */
1648 /* Computing 2nd power */
1650 /* Computing 2nd power */
1652 /* Computing 2nd power */
1654 /* Computing 2nd power */
1656 /* Computing 2nd power */
1658 /* Computing 2nd power */
1660 i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwcon, i__1 =
1661 f2cmax(i__1,i__2), i__2 = (*n << 1) + i__3 *
1662 i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
1663 n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2)
1664 , i__2 = (*n << 1) + lwrk_zgeqp3n__, i__1 =
1665 f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
1666 i__4 + *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,
1667 i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
1668 i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
1669 i__2 = (*n << 1) + i__7 * i__7 + *n +
1670 lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
1671 *n << 1) + i__8 * i__8 + *n + lwrk_zgesvjv__,
1672 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
1673 i__9 * i__9 + *n + lwrk_zunmqr__, i__1 = f2cmax(
1674 i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
1675 + *n + lwrk_zunmlq__, i__1 = f2cmax(i__1,i__2),
1676 i__2 = *n + i__11 * i__11 + lwrk_zgesvju__,
1677 i__1 = f2cmax(i__1,i__2), i__2 = *n +
1679 optwrk = f2cmax(i__1,i__2);
1682 /* Computing 2nd power */
1684 /* Computing 2nd power */
1686 /* Computing 2nd power */
1688 /* Computing 2nd power */
1690 /* Computing 2nd power */
1692 /* Computing 2nd power */
1694 /* Computing 2nd power */
1696 /* Computing 2nd power */
1698 /* Computing 2nd power */
1700 i__1 = *n + lwrk_zgeqp3__, i__2 = (*n << 1) + i__3 *
1701 i__3 + lwcon, i__1 = f2cmax(i__1,i__2), i__2 = (*
1702 n << 1) + lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2)
1703 , i__2 = (*n << 1) + lwrk_zgeqp3n__, i__1 =
1704 f2cmax(i__1,i__2), i__2 = (*n << 1) + i__4 *
1705 i__4 + *n + lwrk_zgelqf__, i__1 = f2cmax(i__1,
1706 i__2), i__2 = (*n << 1) + i__5 * i__5 + *n +
1707 i__6 * i__6 + lwcon, i__1 = f2cmax(i__1,i__2),
1708 i__2 = (*n << 1) + i__7 * i__7 + *n +
1709 lwrk_zgesvj__, i__1 = f2cmax(i__1,i__2), i__2 = (
1710 *n << 1) + i__8 * i__8 + *n + lwrk_zgesvjv__,
1711 i__1 = f2cmax(i__1,i__2), i__2 = (*n << 1) +
1712 i__9 * i__9 + *n + lwrk_zunmqr__, i__1 = f2cmax(
1713 i__1,i__2), i__2 = (*n << 1) + i__10 * i__10
1714 + *n + lwrk_zunmlq__, i__1 = f2cmax(i__1,i__2),
1715 i__2 = *n + i__11 * i__11 + lwrk_zgesvju__,
1716 i__1 = f2cmax(i__1,i__2), i__2 = *n +
1718 optwrk = f2cmax(i__1,i__2);
1721 zgesvj_("L", "U", "V", n, n, &u[u_offset], ldu, &sva[1],
1722 n, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1724 lwrk_zgesvjv__ = (integer) cdummy[0].r;
1725 zunmqr_("L", "N", n, n, n, cdummy, n, cdummy, &v[v_offset]
1726 , ldv, cdummy, &c_n1, &ierr)
1728 lwrk_zunmqr__ = (integer) cdummy[0].r;
1729 zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
1730 u_offset], ldu, cdummy, &c_n1, &ierr);
1731 lwrk_zunmqrm__ = (integer) cdummy[0].r;
1734 /* Computing 2nd power */
1736 /* Computing 2nd power */
1738 /* Computing 2nd power */
1740 i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwcon, i__1 =
1741 f2cmax(i__1,i__2), i__2 = (*n << 1) +
1742 lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
1743 *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
1744 i__2 = (*n << 1) + i__4 * i__4 +
1745 lwrk_zgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
1746 (*n << 1) + i__5 * i__5 + *n + lwrk_zunmqr__,
1747 i__1 = f2cmax(i__1,i__2), i__2 = *n +
1749 optwrk = f2cmax(i__1,i__2);
1752 /* Computing 2nd power */
1754 /* Computing 2nd power */
1756 /* Computing 2nd power */
1758 i__1 = *n + lwrk_zgeqp3__, i__2 = (*n << 1) +
1759 lwrk_zgeqrf__, i__1 = f2cmax(i__1,i__2), i__2 = (
1760 *n << 1) + i__3 * i__3, i__1 = f2cmax(i__1,i__2),
1761 i__2 = (*n << 1) + i__4 * i__4 +
1762 lwrk_zgesvjv__, i__1 = f2cmax(i__1,i__2), i__2 =
1763 (*n << 1) + i__5 * i__5 + *n + lwrk_zunmqr__,
1764 i__1 = f2cmax(i__1,i__2), i__2 = *n +
1766 optwrk = f2cmax(i__1,i__2);
1770 if (l2tran || rowpiv) {
1772 i__1 = 7, i__2 = *m << 1, i__1 = f2cmax(i__1,i__2), i__1 = f2cmax(
1773 i__1,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1774 minrwrk = f2cmax(i__1,lrwcon);
1777 i__1 = f2cmax(7,lrwqp3), i__1 = f2cmax(i__1,lrwsvdj);
1778 minrwrk = f2cmax(i__1,lrwcon);
1781 minwrk = f2cmax(2,minwrk);
1782 optwrk = f2cmax(minwrk,optwrk);
1783 if (*lwork < minwrk && ! lquery) {
1786 if (*lrwork < minrwrk && ! lquery) {
1794 xerbla_("ZGEJSV", &i__1, (ftnlen)6);
1796 } else if (lquery) {
1797 cwork[1].r = (doublereal) optwrk, cwork[1].i = 0.;
1798 cwork[2].r = (doublereal) minwrk, cwork[2].i = 0.;
1799 rwork[1] = (doublereal) minrwrk;
1800 iwork[1] = f2cmax(4,miniwrk);
1804 /* Quick return for void matrix (Y3K safe) */
1806 if (*m == 0 || *n == 0) {
1821 /* Determine whether the matrix U should be M x N or M x M */
1825 if (lsame_(jobu, "F")) {
1830 /* Set numerical parameters */
1832 /* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
1834 epsln = dlamch_("Epsilon");
1835 sfmin = dlamch_("SafeMinimum");
1836 small = sfmin / epsln;
1838 /* BIG = ONE / SFMIN */
1840 /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
1842 /* (!) If necessary, scale SVA() to protect the largest norm from */
1843 /* overflow. It is possible that this scaling pushes the smallest */
1844 /* column norm left from the underflow threshold (extreme case). */
1846 scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
1850 for (p = 1; p <= i__1; ++p) {
1853 zlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
1857 xerbla_("ZGEJSV", &i__2, (ftnlen)6);
1861 if (aapp < big / aaqq && noscal) {
1862 sva[p] = aapp * aaqq;
1865 sva[p] = aapp * (aaqq * scalem);
1869 dscal_(&i__2, &scalem, &sva[1], &c__1);
1882 for (p = 1; p <= i__1; ++p) {
1884 d__1 = aapp, d__2 = sva[p];
1885 aapp = f2cmax(d__1,d__2);
1888 d__1 = aaqq, d__2 = sva[p];
1889 aaqq = f2cmin(d__1,d__2);
1894 /* Quick return for zero M x N matrix */
1898 zlaset_("G", m, &n1, &c_b1, &c_b2, &u[u_offset], ldu);
1901 zlaset_("G", n, n, &c_b1, &c_b2, &v[v_offset], ldv);
1908 if (lsvec && rsvec) {
1923 /* Issue warning if denormalized column norms detected. Override the */
1924 /* high relative accuracy request. Issue licence to kill nonzero columns */
1925 /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
1928 if (aaqq <= sfmin) {
1934 /* Quick return for one-column matrix */
1939 zlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
1941 zlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
1942 /* computing all M left singular vectors of the M x 1 matrix */
1945 zgeqrf_(m, n, &u[u_offset], ldu, &cwork[1], &cwork[*n + 1], &
1948 zungqr_(m, &n1, &c__1, &u[u_offset], ldu, &cwork[1], &cwork[*
1949 n + 1], &i__1, &ierr);
1950 zcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
1955 v[i__1].r = 1., v[i__1].i = 0.;
1957 if (sva[1] < big * scalem) {
1961 rwork[1] = 1. / scalem;
1965 if (sva[1] / scalem >= sfmin) {
1979 if (lsvec && rsvec) {
1995 if (rowpiv || l2tran) {
1997 /* Compute the row norms, needed to determine row pivoting sequence */
1998 /* (in the case of heavily row weighted A, row pivoting is strongly */
1999 /* advised) and to collect information needed to compare the */
2000 /* structures of A * A^* and A^* * A (in the case L2TRAN.EQ..TRUE.). */
2004 for (p = 1; p <= i__1; ++p) {
2007 zlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
2008 /* ZLASSQ gets both the ell_2 and the ell_infinity norm */
2009 /* in one pass through the vector */
2010 rwork[*m + p] = xsc * scalem;
2011 rwork[p] = xsc * (scalem * sqrt(temp1));
2013 d__1 = aatmax, d__2 = rwork[p];
2014 aatmax = f2cmax(d__1,d__2);
2015 if (rwork[p] != 0.) {
2017 d__1 = aatmin, d__2 = rwork[p];
2018 aatmin = f2cmin(d__1,d__2);
2024 for (p = 1; p <= i__1; ++p) {
2025 rwork[*m + p] = scalem * z_abs(&a[p + izamax_(n, &a[p +
2026 a_dim1], lda) * a_dim1]);
2028 d__1 = aatmax, d__2 = rwork[*m + p];
2029 aatmax = f2cmax(d__1,d__2);
2031 d__1 = aatmin, d__2 = rwork[*m + p];
2032 aatmin = f2cmin(d__1,d__2);
2039 /* For square matrix A try to determine whether A^* would be better */
2040 /* input for the preconditioned Jacobi SVD, with faster convergence. */
2041 /* The decision is based on an O(N) function of the vector of column */
2042 /* and row norms of A, based on the Shannon entropy. This should give */
2043 /* the right choice in most cases when the difference actually matters. */
2044 /* It may fail and pick the slower converging side. */
2052 dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
2057 for (p = 1; p <= i__1; ++p) {
2058 /* Computing 2nd power */
2059 d__1 = sva[p] / xsc;
2060 big1 = d__1 * d__1 * temp1;
2062 entra += big1 * log(big1);
2066 entra = -entra / log((doublereal) (*n));
2068 /* Now, SVA().^2/Trace(A^* * A) is a point in the probability simplex. */
2069 /* It is derived from the diagonal of A^* * A. Do the same with the */
2070 /* diagonal of A * A^*, compute the entropy of the corresponding */
2071 /* probability distribution. Note that A * A^* and A^* * A have the */
2076 for (p = 1; p <= i__1; ++p) {
2077 /* Computing 2nd power */
2078 d__1 = rwork[p] / xsc;
2079 big1 = d__1 * d__1 * temp1;
2081 entrat += big1 * log(big1);
2085 entrat = -entrat / log((doublereal) (*m));
2087 /* Analyze the entropies and decide A or A^*. Smaller entropy */
2088 /* usually means better input for the algorithm. */
2090 transp = entrat < entra;
2092 /* If A^* is better than A, take the adjoint of A. This is allowed */
2093 /* only for square matrices, M=N. */
2095 /* In an optimal implementation, this trivial transpose */
2096 /* should be replaced with faster transpose. */
2098 for (p = 1; p <= i__1; ++p) {
2099 i__2 = p + p * a_dim1;
2100 d_cnjg(&z__1, &a[p + p * a_dim1]);
2101 a[i__2].r = z__1.r, a[i__2].i = z__1.i;
2103 for (q = p + 1; q <= i__2; ++q) {
2104 d_cnjg(&z__1, &a[q + p * a_dim1]);
2105 ctemp.r = z__1.r, ctemp.i = z__1.i;
2106 i__3 = q + p * a_dim1;
2107 d_cnjg(&z__1, &a[p + q * a_dim1]);
2108 a[i__3].r = z__1.r, a[i__3].i = z__1.i;
2109 i__3 = p + q * a_dim1;
2110 a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
2115 i__1 = *n + *n * a_dim1;
2116 d_cnjg(&z__1, &a[*n + *n * a_dim1]);
2117 a[i__1].r = z__1.r, a[i__1].i = z__1.i;
2119 for (p = 1; p <= i__1; ++p) {
2120 rwork[*m + p] = sva[p];
2122 /* previously computed row 2-norms are now column 2-norms */
2123 /* of the transposed matrix */
2145 /* Scale the matrix so that its maximal singular value remains less */
2146 /* than SQRT(BIG) -- the matrix is scaled so that its maximal column */
2147 /* has Euclidean norm equal to SQRT(BIG/N). The only reason to keep */
2148 /* SQRT(BIG) instead of BIG is the fact that ZGEJSV uses LAPACK and */
2149 /* BLAS routines that, in some implementations, are not capable of */
2150 /* working in the full interval [SFMIN,BIG] and that they may provoke */
2151 /* overflows in the intermediate results. If the singular values spread */
2152 /* from SFMIN to BIG, then ZGESVJ will compute them. So, in that case, */
2153 /* one should use ZGESVJ instead of ZGEJSV. */
2154 /* >> change in the April 2016 update: allow bigger range, i.e. the */
2155 /* largest column is allowed up to BIG/N and ZGESVJ will do the rest. */
2157 temp1 = sqrt(big / (doublereal) (*n));
2158 /* TEMP1 = BIG/DBLE(N) */
2160 dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
2161 if (aaqq > aapp * sfmin) {
2162 aaqq = aaqq / aapp * temp1;
2164 aaqq = aaqq * temp1 / aapp;
2167 zlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
2169 /* To undo scaling at the end of this procedure, multiply the */
2170 /* computed singular values with USCAL2 / USCAL1. */
2176 /* L2KILL enforces computation of nonzero singular values in */
2177 /* the restricted range of condition number of the initial A, */
2178 /* sigma_max(A) / sigma_min(A) approx. SQRT(BIG)/SQRT(SFMIN). */
2183 /* Now, if the condition number of A is too big, */
2184 /* sigma_max(A) / sigma_min(A) .GT. SQRT(BIG/N) * EPSLN / SFMIN, */
2185 /* as a precaution measure, the full SVD is computed using ZGESVJ */
2186 /* with accumulated Jacobi rotations. This provides numerically */
2187 /* more robust computation, at the cost of slightly increased run */
2188 /* time. Depending on the concrete implementation of BLAS and LAPACK */
2189 /* (i.e. how they behave in presence of extreme ill-conditioning) the */
2190 /* implementor may decide to remove this switch. */
2191 if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
2198 for (p = 1; p <= i__1; ++p) {
2200 zlaset_("A", m, &c__1, &c_b1, &c_b1, &a[p * a_dim1 + 1], lda);
2207 /* Preconditioning using QR factorization with pivoting */
2210 /* Optional row permutation (Bjoerck row pivoting): */
2211 /* A result by Cox and Higham shows that the Bjoerck's */
2212 /* row pivoting combined with standard column pivoting */
2213 /* has similar effect as Powell-Reid complete pivoting. */
2214 /* The ell-infinity norms of A are made nonincreasing. */
2215 if (lsvec && rsvec && ! jracc) {
2221 for (p = 1; p <= i__1; ++p) {
2223 q = idamax_(&i__2, &rwork[*m + p], &c__1) + p - 1;
2224 iwork[iwoff + p] = q;
2226 temp1 = rwork[*m + p];
2227 rwork[*m + p] = rwork[*m + q];
2228 rwork[*m + q] = temp1;
2233 zlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[iwoff + 1], &c__1);
2236 /* End of the preparation phase (scaling, optional sorting and */
2237 /* transposing, optional flushing of small columns). */
2239 /* Preconditioning */
2241 /* If the full SVD is needed, the right singular vectors are computed */
2242 /* from a matrix equation, and for that we need theoretical analysis */
2243 /* of the Businger-Golub pivoting. So we use ZGEQP3 as the first RR QRF. */
2244 /* In all other cases the first RR QRF can be chosen by other criteria */
2245 /* (eg speed by replacing global with restricted window pivoting, such */
2246 /* as in xGEQPX from TOMS # 782). Good results will be obtained using */
2247 /* xGEQPX with properly (!) chosen numerical parameters. */
2248 /* Any improvement of ZGEQP3 improves overal performance of ZGEJSV. */
2250 /* A * P1 = Q1 * [ R1^* 0]^*: */
2252 for (p = 1; p <= i__1; ++p) {
2257 zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &cwork[1], &cwork[*n + 1], &
2258 i__1, &rwork[1], &ierr);
2260 /* The upper triangular matrix R1 from the first QRF is inspected for */
2261 /* rank deficiency and possibilities for deflation, or possible */
2262 /* ill-conditioning. Depending on the user specified flag L2RANK, */
2263 /* the procedure explores possibilities to reduce the numerical */
2264 /* rank by inspecting the computed upper triangular factor. If */
2265 /* L2RANK or L2ABER are up, then ZGEJSV will compute the SVD of */
2266 /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
2270 /* Standard absolute error bound suffices. All sigma_i with */
2271 /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
2272 /* aggressive enforcement of lower numerical rank by introducing a */
2273 /* backward error of the order of N*EPSLN*||A||. */
2274 temp1 = sqrt((doublereal) (*n)) * epsln;
2276 for (p = 2; p <= i__1; ++p) {
2277 if (z_abs(&a[p + p * a_dim1]) >= temp1 * z_abs(&a[a_dim1 + 1])) {
2286 } else if (l2rank) {
2287 /* Sudden drop on the diagonal of R1 is used as the criterion for */
2288 /* close-to-rank-deficient. */
2289 temp1 = sqrt(sfmin);
2291 for (p = 2; p <= i__1; ++p) {
2292 if (z_abs(&a[p + p * a_dim1]) < epsln * z_abs(&a[p - 1 + (p - 1) *
2293 a_dim1]) || z_abs(&a[p + p * a_dim1]) < small || l2kill
2294 && z_abs(&a[p + p * a_dim1]) < temp1) {
2304 /* The goal is high relative accuracy. However, if the matrix */
2305 /* has high scaled condition number the relative accuracy is in */
2306 /* general not feasible. Later on, a condition number estimator */
2307 /* will be deployed to estimate the scaled condition number. */
2308 /* Here we just remove the underflowed part of the triangular */
2309 /* factor. This prevents the situation in which the code is */
2310 /* working hard to get the accuracy not warranted by the data. */
2311 temp1 = sqrt(sfmin);
2313 for (p = 2; p <= i__1; ++p) {
2314 if (z_abs(&a[p + p * a_dim1]) < small || l2kill && z_abs(&a[p + p
2315 * a_dim1]) < temp1) {
2330 for (p = 2; p <= i__1; ++p) {
2331 temp1 = z_abs(&a[p + p * a_dim1]) / sva[iwork[p]];
2332 maxprj = f2cmin(maxprj,temp1);
2335 /* Computing 2nd power */
2337 if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
2350 zlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
2352 for (p = 1; p <= i__1; ++p) {
2353 temp1 = sva[iwork[p]];
2355 zdscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
2359 zpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
2360 cwork[*n + 1], &rwork[1], &ierr);
2362 zpocon_("U", n, &v[v_offset], ldv, &c_b141, &temp1, &
2363 cwork[1], &rwork[1], &ierr);
2367 zlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
2369 for (p = 1; p <= i__1; ++p) {
2370 temp1 = sva[iwork[p]];
2372 zdscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
2375 zpocon_("U", n, &u[u_offset], ldu, &c_b141, &temp1, &cwork[*n
2376 + 1], &rwork[1], &ierr);
2378 zlacpy_("U", n, n, &a[a_offset], lda, &cwork[1], n)
2380 /* [] CALL ZLACPY( 'U', N, N, A, LDA, CWORK(N+1), N ) */
2381 /* Change: here index shifted by N to the left, CWORK(1:N) */
2382 /* not needed for SIGMA only computation */
2384 for (p = 1; p <= i__1; ++p) {
2385 temp1 = sva[iwork[p]];
2386 /* [] CALL ZDSCAL( p, ONE/TEMP1, CWORK(N+(p-1)*N+1), 1 ) */
2388 zdscal_(&p, &d__1, &cwork[(p - 1) * *n + 1], &c__1);
2391 /* [] CALL ZPOCON( 'U', N, CWORK(N+1), N, ONE, TEMP1, */
2392 /* [] $ CWORK(N+N*N+1), RWORK, IERR ) */
2393 zpocon_("U", n, &cwork[1], n, &c_b141, &temp1, &cwork[*n * *n
2394 + 1], &rwork[1], &ierr);
2398 sconda = 1. / sqrt(temp1);
2402 /* SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1). */
2403 /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
2409 z_div(&z__1, &a[a_dim1 + 1], &a[nr + nr * a_dim1]);
2410 l2pert = l2pert && z_abs(&z__1) > sqrt(big1);
2411 /* If there is no violent scaling, artificial perturbation is not needed. */
2415 if (! (rsvec || lsvec)) {
2417 /* Singular Values only */
2421 i__1 = f2cmin(i__2,nr);
2422 for (p = 1; p <= i__1; ++p) {
2424 zcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
2427 zlacgv_(&i__2, &a[p + p * a_dim1], &c__1);
2431 i__1 = *n + *n * a_dim1;
2432 d_cnjg(&z__1, &a[*n + *n * a_dim1]);
2433 a[i__1].r = z__1.r, a[i__1].i = z__1.i;
2436 /* The following two DO-loops introduce small relative perturbation */
2437 /* into the strict upper triangle of the lower triangular matrix. */
2438 /* Small entries below the main diagonal are also changed. */
2439 /* This modification is useful if the computing environment does not */
2440 /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
2441 /* annoying denormalized numbers in case of strongly scaled matrices. */
2442 /* The perturbation is structured so that it does not introduce any */
2443 /* new perturbation of the singular values, and it does not destroy */
2444 /* the job done by the preconditioner. */
2445 /* The licence for this perturbation is in the variable L2PERT, which */
2446 /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
2451 /* XSC = SQRT(SMALL) */
2452 xsc = epsln / (doublereal) (*n);
2454 for (q = 1; q <= i__1; ++q) {
2455 d__1 = xsc * z_abs(&a[q + q * a_dim1]);
2456 z__1.r = d__1, z__1.i = 0.;
2457 ctemp.r = z__1.r, ctemp.i = z__1.i;
2459 for (p = 1; p <= i__2; ++p) {
2460 if (p > q && z_abs(&a[p + q * a_dim1]) <= temp1 || p <
2462 i__3 = p + q * a_dim1;
2463 a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
2465 /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
2473 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1]
2479 zgeqrf_(n, &nr, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
2483 for (p = 1; p <= i__1; ++p) {
2485 zcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
2488 zlacgv_(&i__2, &a[p + p * a_dim1], &c__1);
2494 /* Row-cyclic Jacobi SVD algorithm with column pivoting */
2496 /* to drown denormals */
2498 /* XSC = SQRT(SMALL) */
2499 xsc = epsln / (doublereal) (*n);
2501 for (q = 1; q <= i__1; ++q) {
2502 d__1 = xsc * z_abs(&a[q + q * a_dim1]);
2503 z__1.r = d__1, z__1.i = 0.;
2504 ctemp.r = z__1.r, ctemp.i = z__1.i;
2506 for (p = 1; p <= i__2; ++p) {
2507 if (p > q && z_abs(&a[p + q * a_dim1]) <= temp1 || p < q)
2509 i__3 = p + q * a_dim1;
2510 a[i__3].r = ctemp.r, a[i__3].i = ctemp.i;
2512 /* $ A(p,q) = TEMP1 * ( A(p,q) / ABS(A(p,q)) ) */
2520 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &a[(a_dim1 << 1) + 1],
2524 /* triangular matrix (plus perturbation which is ignored in */
2525 /* the part which destroys triangular form (confusing?!)) */
2527 zgesvj_("L", "N", "N", &nr, &nr, &a[a_offset], lda, &sva[1], n, &v[
2528 v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
2531 numrank = i_dnnt(&rwork[2]);
2534 } else if (rsvec && ! lsvec && ! jracc || jracc && ! lsvec && nr != *n) {
2536 /* -> Singular Values and Right Singular Vectors <- */
2541 for (p = 1; p <= i__1; ++p) {
2543 zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
2546 zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
2551 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
2554 zgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
2555 a[a_offset], lda, &cwork[1], lwork, &rwork[1], lrwork,
2558 numrank = i_dnnt(&rwork[2]);
2561 /* accumulated product of Jacobi rotations, three are perfect ) */
2565 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
2567 zgelqf_(&nr, n, &a[a_offset], lda, &cwork[1], &cwork[*n + 1], &
2569 zlacpy_("L", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
2572 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
2574 i__1 = *lwork - (*n << 1);
2575 zgeqrf_(&nr, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
2576 1) + 1], &i__1, &ierr);
2578 for (p = 1; p <= i__1; ++p) {
2580 zcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
2583 zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
2588 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1],
2592 zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &nr,
2593 &u[u_offset], ldu, &cwork[*n + 1], &i__1, &rwork[1],
2596 numrank = i_dnnt(&rwork[2]);
2599 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
2602 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
2606 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
2611 zunmlq_("L", "C", n, n, &nr, &a[a_offset], lda, &cwork[1], &v[
2612 v_offset], ldv, &cwork[*n + 1], &i__1, &ierr);
2615 /* DO 8991 p = 1, N */
2616 /* CALL ZCOPY( N, V(p,1), LDV, A(IWORK(p),1), LDA ) */
2618 /* CALL ZLACPY( 'All', N, N, A, LDA, V, LDV ) */
2619 zlapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2622 zlacpy_("A", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
2625 } else if (jracc && ! lsvec && nr == *n) {
2629 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
2631 zgesvj_("U", "N", "V", n, n, &a[a_offset], lda, &sva[1], n, &v[
2632 v_offset], ldv, &cwork[1], lwork, &rwork[1], lrwork, info);
2634 numrank = i_dnnt(&rwork[2]);
2635 zlapmr_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2637 } else if (lsvec && ! rsvec) {
2640 /* Jacobi rotations in the Jacobi iterations. */
2642 for (p = 1; p <= i__1; ++p) {
2644 zcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
2646 zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
2651 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
2653 i__1 = *lwork - (*n << 1);
2654 zgeqrf_(n, &nr, &u[u_offset], ldu, &cwork[*n + 1], &cwork[(*n << 1) +
2658 for (p = 1; p <= i__1; ++p) {
2660 zcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
2663 zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
2668 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1], ldu);
2671 zgesvj_("L", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr, &a[
2672 a_offset], lda, &cwork[*n + 1], &i__1, &rwork[1], lrwork,
2675 numrank = i_dnnt(&rwork[2]);
2679 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1], ldu);
2682 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) * u_dim1 +
2686 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr + 1)
2692 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
2693 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
2697 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff + 1], &
2702 for (p = 1; p <= i__1; ++p) {
2703 xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
2704 zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
2709 zlacpy_("A", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
2719 /* Second Preconditioning Step (QRF [with pivoting]) */
2720 /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
2721 /* equivalent to an LQF CALL. Since in many libraries the QRF */
2722 /* seems to be better optimized than the LQF, we do explicit */
2723 /* transpose and use the QRF. This is subject to changes in an */
2724 /* optimized implementation of ZGEJSV. */
2727 for (p = 1; p <= i__1; ++p) {
2729 zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
2732 zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
2736 /* denormals in the second QR factorization, where they are */
2737 /* as good as zeros. This is done to avoid painfully slow */
2738 /* computation with denormals. The relative size of the perturbation */
2739 /* is a parameter that can be changed by the implementer. */
2740 /* This perturbation device will be obsolete on machines with */
2741 /* properly implemented arithmetic. */
2742 /* To switch it off, set L2PERT=.FALSE. To remove it from the */
2743 /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
2744 /* The following two loops should be blocked and fused with the */
2745 /* transposed copy above. */
2750 for (q = 1; q <= i__1; ++q) {
2751 d__1 = xsc * z_abs(&v[q + q * v_dim1]);
2752 z__1.r = d__1, z__1.i = 0.;
2753 ctemp.r = z__1.r, ctemp.i = z__1.i;
2755 for (p = 1; p <= i__2; ++p) {
2756 if (p > q && z_abs(&v[p + q * v_dim1]) <= temp1 ||
2758 i__3 = p + q * v_dim1;
2759 v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
2761 /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
2763 i__3 = p + q * v_dim1;
2764 i__4 = p + q * v_dim1;
2765 z__1.r = -v[i__4].r, z__1.i = -v[i__4].i;
2766 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2775 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
2779 /* Estimate the row scaled condition number of R1 */
2780 /* (If R1 is rectangular, N > NR, then the condition number */
2781 /* of the leading NR x NR submatrix is estimated.) */
2783 zlacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) +
2786 for (p = 1; p <= i__1; ++p) {
2788 temp1 = dznrm2_(&i__2, &cwork[(*n << 1) + (p - 1) * nr +
2792 zdscal_(&i__2, &d__1, &cwork[(*n << 1) + (p - 1) * nr + p]
2796 zpocon_("L", &nr, &cwork[(*n << 1) + 1], &nr, &c_b141, &temp1,
2797 &cwork[(*n << 1) + nr * nr + 1], &rwork[1], &ierr);
2798 condr1 = 1. / sqrt(temp1);
2799 /* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
2800 /* more conservative <=> CONDR1 .LT. SQRT(DBLE(N)) */
2802 cond_ok__ = sqrt(sqrt((doublereal) nr));
2803 /* [TP] COND_OK is a tuning parameter. */
2805 if (condr1 < cond_ok__) {
2806 /* implementation, this QRF should be implemented as the QRF */
2807 /* of a lower triangular matrix. */
2808 /* R1^* = Q2 * R2 */
2809 i__1 = *lwork - (*n << 1);
2810 zgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[
2811 (*n << 1) + 1], &i__1, &ierr);
2814 xsc = sqrt(small) / epsln;
2816 for (p = 2; p <= i__1; ++p) {
2818 for (q = 1; q <= i__2; ++q) {
2820 d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
2821 z_abs(&v[q + q * v_dim1]);
2822 d__1 = xsc * f2cmin(d__2,d__3);
2823 z__1.r = d__1, z__1.i = 0.;
2824 ctemp.r = z__1.r, ctemp.i = z__1.i;
2825 if (z_abs(&v[q + p * v_dim1]) <= temp1) {
2826 i__3 = q + p * v_dim1;
2827 v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
2829 /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
2837 zlacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n <<
2842 for (p = 1; p <= i__1; ++p) {
2844 zcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
2845 + p * v_dim1], &c__1);
2847 zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
2850 i__1 = nr + nr * v_dim1;
2851 d_cnjg(&z__1, &v[nr + nr * v_dim1]);
2852 v[i__1].r = z__1.r, v[i__1].i = z__1.i;
2858 /* Note that windowed pivoting would be equally good */
2859 /* numerically, and more run-time efficient. So, in */
2860 /* an optimal implementation, the next call to ZGEQP3 */
2861 /* should be replaced with eg. CALL ZGEQPX (ACM TOMS #782) */
2862 /* with properly (carefully) chosen parameters. */
2864 /* R1^* * P2 = Q2 * R2 */
2866 for (p = 1; p <= i__1; ++p) {
2870 i__1 = *lwork - (*n << 1);
2871 zgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &cwork[
2872 *n + 1], &cwork[(*n << 1) + 1], &i__1, &rwork[1],
2874 /* * CALL ZGEQRF( N, NR, V, LDV, CWORK(N+1), CWORK(2*N+1), */
2875 /* * $ LWORK-2*N, IERR ) */
2879 for (p = 2; p <= i__1; ++p) {
2881 for (q = 1; q <= i__2; ++q) {
2883 d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
2884 z_abs(&v[q + q * v_dim1]);
2885 d__1 = xsc * f2cmin(d__2,d__3);
2886 z__1.r = d__1, z__1.i = 0.;
2887 ctemp.r = z__1.r, ctemp.i = z__1.i;
2888 if (z_abs(&v[q + p * v_dim1]) <= temp1) {
2889 i__3 = q + p * v_dim1;
2890 v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
2892 /* $ V(q,p) = TEMP1 * ( V(q,p) / ABS(V(q,p)) ) */
2899 zlacpy_("A", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
2905 for (p = 2; p <= i__1; ++p) {
2907 for (q = 1; q <= i__2; ++q) {
2909 d__2 = z_abs(&v[p + p * v_dim1]), d__3 =
2910 z_abs(&v[q + q * v_dim1]);
2911 d__1 = xsc * f2cmin(d__2,d__3);
2912 z__1.r = d__1, z__1.i = 0.;
2913 ctemp.r = z__1.r, ctemp.i = z__1.i;
2914 /* V(p,q) = - TEMP1*( V(q,p) / ABS(V(q,p)) ) */
2915 i__3 = p + q * v_dim1;
2916 z__1.r = -ctemp.r, z__1.i = -ctemp.i;
2917 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2925 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 +
2928 /* Now, compute R2 = L3 * Q3, the LQ factorization. */
2929 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2930 zgelqf_(&nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + *
2931 n * nr + 1], &cwork[(*n << 1) + *n * nr + nr + 1],
2933 zlacpy_("L", &nr, &nr, &v[v_offset], ldv, &cwork[(*n << 1)
2934 + *n * nr + nr + 1], &nr);
2936 for (p = 1; p <= i__1; ++p) {
2937 temp1 = dznrm2_(&p, &cwork[(*n << 1) + *n * nr + nr +
2940 zdscal_(&p, &d__1, &cwork[(*n << 1) + *n * nr + nr +
2944 zpocon_("L", &nr, &cwork[(*n << 1) + *n * nr + nr + 1], &
2945 nr, &c_b141, &temp1, &cwork[(*n << 1) + *n * nr +
2946 nr + nr * nr + 1], &rwork[1], &ierr);
2947 condr2 = 1. / sqrt(temp1);
2950 if (condr2 >= cond_ok__) {
2951 /* (this overwrites the copy of R2, as it will not be */
2952 /* needed in this branch, but it does not overwritte the */
2953 /* Huseholder vectors of Q2.). */
2954 zlacpy_("U", &nr, &nr, &v[v_offset], ldv, &cwork[(*n
2956 /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
2964 for (q = 2; q <= i__1; ++q) {
2965 i__2 = q + q * v_dim1;
2966 z__1.r = xsc * v[i__2].r, z__1.i = xsc * v[i__2].i;
2967 ctemp.r = z__1.r, ctemp.i = z__1.i;
2969 for (p = 1; p <= i__2; ++p) {
2970 /* V(p,q) = - TEMP1*( V(p,q) / ABS(V(p,q)) ) */
2971 i__3 = p + q * v_dim1;
2972 z__1.r = -ctemp.r, z__1.i = -ctemp.i;
2973 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2981 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
2985 /* Second preconditioning finished; continue with Jacobi SVD */
2986 /* The input matrix is lower trinagular. */
2988 /* Recover the right singular vectors as solution of a well */
2989 /* conditioned triangular matrix equation. */
2991 if (condr1 < cond_ok__) {
2993 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2994 zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
2995 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
2996 * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
2998 numrank = i_dnnt(&rwork[2]);
3000 for (p = 1; p <= i__1; ++p) {
3001 zcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
3003 zdscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
3008 /* :)) .. best case, R1 is inverted. The solution of this matrix */
3009 /* equation is Q2*V2 = the product of the Jacobi rotations */
3010 /* used in ZGESVJ, premultiplied with the orthogonal matrix */
3011 /* from the second QR factorization. */
3012 ztrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &a[
3013 a_offset], lda, &v[v_offset], ldv);
3015 /* is inverted to get the product of the Jacobi rotations */
3016 /* used in ZGESVJ. The Q-factor from the second QR */
3017 /* factorization is then built in explicitly. */
3018 ztrsm_("L", "U", "C", "N", &nr, &nr, &c_b2, &cwork[(*
3019 n << 1) + 1], n, &v[v_offset], ldv);
3022 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1
3025 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1)
3026 * v_dim1 + 1], ldv);
3029 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr +
3030 1 + (nr + 1) * v_dim1], ldv);
3032 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3033 zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n,
3034 &cwork[*n + 1], &v[v_offset], ldv, &cwork[(*
3035 n << 1) + *n * nr + nr + 1], &i__1, &ierr);
3038 } else if (condr2 < cond_ok__) {
3040 /* The matrix R2 is inverted. The solution of the matrix equation */
3041 /* is Q3^* * V3 = the product of the Jacobi rotations (appplied to */
3042 /* the lower triangular L3 from the LQ factorization of */
3043 /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
3044 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3045 zgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
3046 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
3047 * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
3049 numrank = i_dnnt(&rwork[2]);
3051 for (p = 1; p <= i__1; ++p) {
3052 zcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
3054 zdscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
3057 ztrsm_("L", "U", "N", "N", &nr, &nr, &c_b2, &cwork[(*n <<
3058 1) + 1], n, &u[u_offset], ldu);
3060 for (q = 1; q <= i__1; ++q) {
3062 for (p = 1; p <= i__2; ++p) {
3063 i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
3064 i__4 = p + q * u_dim1;
3065 cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
3070 for (p = 1; p <= i__2; ++p) {
3071 i__3 = p + q * u_dim1;
3072 i__4 = (*n << 1) + *n * nr + nr + p;
3073 u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
3081 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
3084 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
3088 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
3089 nr + 1) * v_dim1], ldv);
3091 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3092 zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
3093 cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
3094 + *n * nr + nr + 1], &i__1, &ierr);
3096 /* Last line of defense. */
3097 /* #:( This is a rather pathological case: no scaled condition */
3098 /* improvement after two pivoted QR factorizations. Other */
3099 /* possibility is that the rank revealing QR factorization */
3100 /* or the condition estimator has failed, or the COND_OK */
3101 /* is set very close to ONE (which is unnecessary). Normally, */
3102 /* this branch should never be executed, but in rare cases of */
3103 /* failure of the RRQR or condition estimator, the last line of */
3104 /* defense ensures that ZGEJSV completes the task. */
3105 /* Compute the full SVD of L3 using ZGESVJ with explicit */
3106 /* accumulation of Jacobi rotations. */
3107 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3108 zgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
3109 1], &nr, &u[u_offset], ldu, &cwork[(*n << 1) + *n
3110 * nr + nr + 1], &i__1, &rwork[1], lrwork, info);
3112 numrank = i_dnnt(&rwork[2]);
3115 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 +
3118 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
3122 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (
3123 nr + 1) * v_dim1], ldv);
3125 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3126 zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &
3127 cwork[*n + 1], &v[v_offset], ldv, &cwork[(*n << 1)
3128 + *n * nr + nr + 1], &i__1, &ierr);
3130 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3131 zunmlq_("L", "C", &nr, &nr, &nr, &cwork[(*n << 1) + 1], n,
3132 &cwork[(*n << 1) + *n * nr + 1], &u[u_offset],
3133 ldu, &cwork[(*n << 1) + *n * nr + nr + 1], &i__1,
3136 for (q = 1; q <= i__1; ++q) {
3138 for (p = 1; p <= i__2; ++p) {
3139 i__3 = (*n << 1) + *n * nr + nr + iwork[*n + p];
3140 i__4 = p + q * u_dim1;
3141 cwork[i__3].r = u[i__4].r, cwork[i__3].i = u[i__4]
3146 for (p = 1; p <= i__2; ++p) {
3147 i__3 = p + q * u_dim1;
3148 i__4 = (*n << 1) + *n * nr + nr + p;
3149 u[i__3].r = cwork[i__4].r, u[i__3].i = cwork[i__4]
3158 /* Permute the rows of V using the (column) permutation from the */
3159 /* first QRF. Also, scale the columns to make them unit in */
3160 /* Euclidean norm. This applies to all cases. */
3162 temp1 = sqrt((doublereal) (*n)) * epsln;
3164 for (q = 1; q <= i__1; ++q) {
3166 for (p = 1; p <= i__2; ++p) {
3167 i__3 = (*n << 1) + *n * nr + nr + iwork[p];
3168 i__4 = p + q * v_dim1;
3169 cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
3173 for (p = 1; p <= i__2; ++p) {
3174 i__3 = p + q * v_dim1;
3175 i__4 = (*n << 1) + *n * nr + nr + p;
3176 v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
3179 xsc = 1. / dznrm2_(n, &v[q * v_dim1 + 1], &c__1);
3180 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
3181 zdscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
3185 /* At this moment, V contains the right singular vectors of A. */
3186 /* Next, assemble the left singular vector matrix U (M x N). */
3189 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
3193 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
3197 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
3198 nr + 1) * u_dim1], ldu);
3202 /* The Q matrix from the first QRF is built into the left singular */
3203 /* matrix U. This applies to all cases. */
3206 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
3207 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
3208 /* The columns of U are normalized. The cost is O(M*N) flops. */
3209 temp1 = sqrt((doublereal) (*m)) * epsln;
3211 for (p = 1; p <= i__1; ++p) {
3212 xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
3213 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
3214 zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
3219 /* If the initial QRF is computed with row pivoting, the left */
3220 /* singular vectors must be adjusted. */
3224 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
3230 /* the second QRF is not needed */
3232 zlacpy_("U", n, n, &a[a_offset], lda, &cwork[*n + 1], n);
3236 for (p = 2; p <= i__1; ++p) {
3237 i__2 = *n + (p - 1) * *n + p;
3238 z__1.r = xsc * cwork[i__2].r, z__1.i = xsc * cwork[
3240 ctemp.r = z__1.r, ctemp.i = z__1.i;
3242 for (q = 1; q <= i__2; ++q) {
3243 /* CWORK(N+(q-1)*N+p)=-TEMP1 * ( CWORK(N+(p-1)*N+q) / */
3244 /* $ ABS(CWORK(N+(p-1)*N+q)) ) */
3245 i__3 = *n + (q - 1) * *n + p;
3246 z__1.r = -ctemp.r, z__1.i = -ctemp.i;
3247 cwork[i__3].r = z__1.r, cwork[i__3].i = z__1.i;
3255 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &cwork[*n + 2],
3259 i__1 = *lwork - *n - *n * *n;
3260 zgesvj_("U", "U", "N", n, n, &cwork[*n + 1], n, &sva[1], n, &
3261 u[u_offset], ldu, &cwork[*n + *n * *n + 1], &i__1, &
3262 rwork[1], lrwork, info);
3265 numrank = i_dnnt(&rwork[2]);
3267 for (p = 1; p <= i__1; ++p) {
3268 zcopy_(n, &cwork[*n + (p - 1) * *n + 1], &c__1, &u[p *
3269 u_dim1 + 1], &c__1);
3270 zdscal_(n, &sva[p], &cwork[*n + (p - 1) * *n + 1], &c__1);
3274 ztrsm_("L", "U", "N", "N", n, n, &c_b2, &a[a_offset], lda, &
3277 for (p = 1; p <= i__1; ++p) {
3278 zcopy_(n, &cwork[*n + p], n, &v[iwork[p] + v_dim1], ldv);
3281 temp1 = sqrt((doublereal) (*n)) * epsln;
3283 for (p = 1; p <= i__1; ++p) {
3284 xsc = 1. / dznrm2_(n, &v[p * v_dim1 + 1], &c__1);
3285 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
3286 zdscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
3291 /* Assemble the left singular vector matrix U (M x N). */
3295 zlaset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 + u_dim1],
3299 zlaset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
3303 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n + 1 + (
3304 *n + 1) * u_dim1], ldu);
3308 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
3309 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
3310 temp1 = sqrt((doublereal) (*m)) * epsln;
3312 for (p = 1; p <= i__1; ++p) {
3313 xsc = 1. / dznrm2_(m, &u[p * u_dim1 + 1], &c__1);
3314 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
3315 zdscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
3322 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[
3328 /* end of the >> almost orthogonal case << in the full SVD */
3332 /* This branch deploys a preconditioned Jacobi SVD with explicitly */
3333 /* accumulated rotations. It is included as optional, mainly for */
3334 /* experimental purposes. It does perform well, and can also be used. */
3335 /* In this implementation, this branch will be automatically activated */
3336 /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
3337 /* to be greater than the overflow threshold. This is because the */
3338 /* a posteriori computation of the singular vectors assumes robust */
3339 /* implementation of BLAS and some LAPACK procedures, capable of working */
3340 /* in presence of extreme values, e.g. when the singular values spread from */
3341 /* the underflow to the overflow threshold. */
3344 for (p = 1; p <= i__1; ++p) {
3346 zcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
3349 zlacgv_(&i__2, &v[p + p * v_dim1], &c__1);
3354 xsc = sqrt(small / epsln);
3356 for (q = 1; q <= i__1; ++q) {
3357 d__1 = xsc * z_abs(&v[q + q * v_dim1]);
3358 z__1.r = d__1, z__1.i = 0.;
3359 ctemp.r = z__1.r, ctemp.i = z__1.i;
3361 for (p = 1; p <= i__2; ++p) {
3362 if (p > q && z_abs(&v[p + q * v_dim1]) <= temp1 || p <
3364 i__3 = p + q * v_dim1;
3365 v[i__3].r = ctemp.r, v[i__3].i = ctemp.i;
3367 /* $ V(p,q) = TEMP1 * ( V(p,q) / ABS(V(p,q)) ) */
3369 i__3 = p + q * v_dim1;
3370 i__4 = p + q * v_dim1;
3371 z__1.r = -v[i__4].r, z__1.i = -v[i__4].i;
3372 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
3381 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1]
3384 i__1 = *lwork - (*n << 1);
3385 zgeqrf_(n, &nr, &v[v_offset], ldv, &cwork[*n + 1], &cwork[(*n <<
3386 1) + 1], &i__1, &ierr);
3387 zlacpy_("L", n, &nr, &v[v_offset], ldv, &cwork[(*n << 1) + 1], n);
3390 for (p = 1; p <= i__1; ++p) {
3392 zcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
3395 zlacgv_(&i__2, &u[p + p * u_dim1], &c__1);
3399 xsc = sqrt(small / epsln);
3401 for (q = 2; q <= i__1; ++q) {
3403 for (p = 1; p <= i__2; ++p) {
3405 d__2 = z_abs(&u[p + p * u_dim1]), d__3 = z_abs(&u[q +
3407 d__1 = xsc * f2cmin(d__2,d__3);
3408 z__1.r = d__1, z__1.i = 0.;
3409 ctemp.r = z__1.r, ctemp.i = z__1.i;
3410 /* U(p,q) = - TEMP1 * ( U(q,p) / ABS(U(q,p)) ) */
3411 i__3 = p + q * u_dim1;
3412 z__1.r = -ctemp.r, z__1.i = -ctemp.i;
3413 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
3421 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1]
3424 i__1 = *lwork - (*n << 1) - *n * nr;
3425 zgesvj_("L", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
3426 v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + 1], &i__1,
3427 &rwork[1], lrwork, info);
3429 numrank = i_dnnt(&rwork[2]);
3432 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
3435 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 +
3439 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr + 1)
3442 i__1 = *lwork - (*n << 1) - *n * nr - nr;
3443 zunmqr_("L", "N", n, n, &nr, &cwork[(*n << 1) + 1], n, &cwork[*n
3444 + 1], &v[v_offset], ldv, &cwork[(*n << 1) + *n * nr + nr
3445 + 1], &i__1, &ierr);
3447 /* Permute the rows of V using the (column) permutation from the */
3448 /* first QRF. Also, scale the columns to make them unit in */
3449 /* Euclidean norm. This applies to all cases. */
3451 temp1 = sqrt((doublereal) (*n)) * epsln;
3453 for (q = 1; q <= i__1; ++q) {
3455 for (p = 1; p <= i__2; ++p) {
3456 i__3 = (*n << 1) + *n * nr + nr + iwork[p];
3457 i__4 = p + q * v_dim1;
3458 cwork[i__3].r = v[i__4].r, cwork[i__3].i = v[i__4].i;
3462 for (p = 1; p <= i__2; ++p) {
3463 i__3 = p + q * v_dim1;
3464 i__4 = (*n << 1) + *n * nr + nr + p;
3465 v[i__3].r = cwork[i__4].r, v[i__3].i = cwork[i__4].i;
3468 xsc = 1. / dznrm2_(n, &v[q * v_dim1 + 1], &c__1);
3469 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
3470 zdscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
3475 /* At this moment, V contains the right singular vectors of A. */
3476 /* Next, assemble the left singular vector matrix U (M x N). */
3480 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1],
3484 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
3488 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr
3489 + 1) * u_dim1], ldu);
3494 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
3495 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
3499 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[iwoff +
3507 for (p = 1; p <= i__1; ++p) {
3508 zswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
3515 /* end of the full SVD */
3517 /* Undo scaling, if necessary (and possible) */
3519 if (uscal2 <= big / sva[1] * uscal1) {
3520 dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
3528 for (p = nr + 1; p <= i__1; ++p) {
3534 rwork[1] = uscal2 * scalem;
3539 if (lsvec && rsvec) {