14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static doublecomplex c_b1 = {0.,0.};
516 static doublecomplex c_b2 = {1.,0.};
517 static integer c_n1 = -1;
518 static integer c__1 = 1;
519 static doublereal c_b74 = 0.;
520 static integer c__0 = 0;
521 static doublereal c_b87 = 1.;
522 static logical c_false = FALSE_;
524 /* > \brief <b> ZGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method
525 for GE matrices</b> */
527 /* =========== DOCUMENTATION =========== */
529 /* Online html documentation available at */
530 /* http://www.netlib.org/lapack/explore-html/ */
533 /* > Download ZGESVDQ + dependencies */
534 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/zgesvdq
537 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/zgesvdq
540 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/zgesvdq
548 /* SUBROUTINE ZGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, */
549 /* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, */
550 /* CWORK, LCWORK, RWORK, LRWORK, INFO ) */
553 /* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV */
554 /* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, */
556 /* COMPLEX*16 A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) */
557 /* DOUBLE PRECISION S( * ), RWORK( * ) */
558 /* INTEGER IWORK( * ) */
561 /* > \par Purpose: */
566 /* ZCGESVDQ computes the singular value decomposition (SVD) of a complex */
567 /* > M-by-N matrix A, where M >= N. The SVD of A is written as */
568 /* > [++] [xx] [x0] [xx] */
569 /* > A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] */
571 /* > where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal */
572 /* > matrix, and V is an N-by-N unitary matrix. The diagonal elements */
573 /* > of SIGMA are the singular values of A. The columns of U and V are the */
574 /* > left and the right singular vectors of A, respectively. */
580 /* > \param[in] JOBA */
582 /* > JOBA is CHARACTER*1 */
583 /* > Specifies the level of accuracy in the computed SVD */
584 /* > = 'A' The requested accuracy corresponds to having the backward */
585 /* > error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, */
586 /* > where EPS = DLAMCH('Epsilon'). This authorises ZGESVDQ to */
587 /* > truncate the computed triangular factor in a rank revealing */
588 /* > QR factorization whenever the truncated part is below the */
589 /* > threshold of the order of EPS * ||A||_F. This is aggressive */
590 /* > truncation level. */
591 /* > = 'M' Similarly as with 'A', but the truncation is more gentle: it */
592 /* > is allowed only when there is a drop on the diagonal of the */
593 /* > triangular factor in the QR factorization. This is medium */
594 /* > truncation level. */
595 /* > = 'H' High accuracy requested. No numerical rank determination based */
596 /* > on the rank revealing QR factorization is attempted. */
597 /* > = 'E' Same as 'H', and in addition the condition number of column */
598 /* > scaled A is estimated and returned in RWORK(1). */
599 /* > N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) */
602 /* > \param[in] JOBP */
604 /* > JOBP is CHARACTER*1 */
605 /* > = 'P' The rows of A are ordered in decreasing order with respect to */
606 /* > ||A(i,:)||_\infty. This enhances numerical accuracy at the cost */
607 /* > of extra data movement. Recommended for numerical robustness. */
608 /* > = 'N' No row pivoting. */
611 /* > \param[in] JOBR */
613 /* > JOBR is CHARACTER*1 */
614 /* > = 'T' After the initial pivoted QR factorization, ZGESVD is applied to */
615 /* > the adjoint R**H of the computed triangular factor R. This involves */
616 /* > some extra data movement (matrix transpositions). Useful for */
617 /* > experiments, research and development. */
618 /* > = 'N' The triangular factor R is given as input to CGESVD. This may be */
619 /* > preferred as it involves less data movement. */
622 /* > \param[in] JOBU */
624 /* > JOBU is CHARACTER*1 */
625 /* > = 'A' All M left singular vectors are computed and returned in the */
626 /* > matrix U. See the description of U. */
627 /* > = 'S' or 'U' N = f2cmin(M,N) left singular vectors are computed and returned */
628 /* > in the matrix U. See the description of U. */
629 /* > = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular */
630 /* > vectors are computed and returned in the matrix U. */
631 /* > = 'F' The N left singular vectors are returned in factored form as the */
632 /* > product of the Q factor from the initial QR factorization and the */
633 /* > N left singular vectors of (R**H , 0)**H. If row pivoting is used, */
634 /* > then the necessary information on the row pivoting is stored in */
635 /* > IWORK(N+1:N+M-1). */
636 /* > = 'N' The left singular vectors are not computed. */
639 /* > \param[in] JOBV */
641 /* > JOBV is CHARACTER*1 */
642 /* > = 'A', 'V' All N right singular vectors are computed and returned in */
643 /* > the matrix V. */
644 /* > = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular */
645 /* > vectors are computed and returned in the matrix V. This option is */
646 /* > allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. */
647 /* > = 'N' The right singular vectors are not computed. */
653 /* > The number of rows of the input matrix A. M >= 0. */
659 /* > The number of columns of the input matrix A. M >= N >= 0. */
662 /* > \param[in,out] A */
664 /* > A is COMPLEX*16 array of dimensions LDA x N */
665 /* > On entry, the input matrix A. */
666 /* > On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains */
667 /* > the Householder vectors as stored by ZGEQP3. If JOBU = 'F', these Householder */
668 /* > vectors together with CWORK(1:N) can be used to restore the Q factors from */
669 /* > the initial pivoted QR factorization of A. See the description of U. */
672 /* > \param[in] LDA */
674 /* > LDA is INTEGER. */
675 /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
678 /* > \param[out] S */
680 /* > S is DOUBLE PRECISION array of dimension N. */
681 /* > The singular values of A, ordered so that S(i) >= S(i+1). */
684 /* > \param[out] U */
686 /* > U is COMPLEX*16 array, dimension */
687 /* > LDU x M if JOBU = 'A'; see the description of LDU. In this case, */
688 /* > on exit, U contains the M left singular vectors. */
689 /* > LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this */
690 /* > case, U contains the leading N or the leading NUMRANK left singular vectors. */
691 /* > LDU x N if JOBU = 'F' ; see the description of LDU. In this case U */
692 /* > contains N x N unitary matrix that can be used to form the left */
693 /* > singular vectors. */
694 /* > If JOBU = 'N', U is not referenced. */
697 /* > \param[in] LDU */
699 /* > LDU is INTEGER. */
700 /* > The leading dimension of the array U. */
701 /* > If JOBU = 'A', 'S', 'U', 'R', LDU >= f2cmax(1,M). */
702 /* > If JOBU = 'F', LDU >= f2cmax(1,N). */
703 /* > Otherwise, LDU >= 1. */
706 /* > \param[out] V */
708 /* > V is COMPLEX*16 array, dimension */
709 /* > LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . */
710 /* > If JOBV = 'A', or 'V', V contains the N-by-N unitary matrix V**H; */
711 /* > If JOBV = 'R', V contains the first NUMRANK rows of V**H (the right */
712 /* > singular vectors, stored rowwise, of the NUMRANK largest singular values). */
713 /* > If JOBV = 'N' and JOBA = 'E', V is used as a workspace. */
714 /* > If JOBV = 'N', and JOBA.NE.'E', V is not referenced. */
717 /* > \param[in] LDV */
719 /* > LDV is INTEGER */
720 /* > The leading dimension of the array V. */
721 /* > If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= f2cmax(1,N). */
722 /* > Otherwise, LDV >= 1. */
725 /* > \param[out] NUMRANK */
727 /* > NUMRANK is INTEGER */
728 /* > NUMRANK is the numerical rank first determined after the rank */
729 /* > revealing QR factorization, following the strategy specified by the */
730 /* > value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK */
731 /* > leading singular values and vectors are then requested in the call */
732 /* > of CGESVD. The final value of NUMRANK might be further reduced if */
733 /* > some singular values are computed as zeros. */
736 /* > \param[out] IWORK */
738 /* > IWORK is INTEGER array, dimension (f2cmax(1, LIWORK)). */
739 /* > On exit, IWORK(1:N) contains column pivoting permutation of the */
740 /* > rank revealing QR factorization. */
741 /* > If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence */
742 /* > of row swaps used in row pivoting. These can be used to restore the */
743 /* > left singular vectors in the case JOBU = 'F'. */
745 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
746 /* > LIWORK(1) returns the minimal LIWORK. */
749 /* > \param[in] LIWORK */
751 /* > LIWORK is INTEGER */
752 /* > The dimension of the array IWORK. */
753 /* > LIWORK >= N + M - 1, if JOBP = 'P'; */
754 /* > LIWORK >= N if JOBP = 'N'. */
756 /* > If LIWORK = -1, then a workspace query is assumed; the routine */
757 /* > only calculates and returns the optimal and minimal sizes */
758 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
759 /* > message related to LCWORK is issued by XERBLA. */
762 /* > \param[out] CWORK */
764 /* > CWORK is COMPLEX*12 array, dimension (f2cmax(2, LCWORK)), used as a workspace. */
765 /* > On exit, if, on entry, LCWORK.NE.-1, CWORK(1:N) contains parameters */
766 /* > needed to recover the Q factor from the QR factorization computed by */
769 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
770 /* > CWORK(1) returns the optimal LCWORK, and */
771 /* > CWORK(2) returns the minimal LCWORK. */
774 /* > \param[in,out] LCWORK */
776 /* > LCWORK is INTEGER */
777 /* > The dimension of the array CWORK. It is determined as follows: */
778 /* > Let LWQP3 = N+1, LWCON = 2*N, and let */
779 /* > LWUNQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' */
780 /* > { MAX( M, 1 ), if JOBU = 'A' */
781 /* > LWSVD = MAX( 3*N, 1 ) */
782 /* > LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 3*(N/2), 1 ), LWUNLQ = MAX( N, 1 ), */
783 /* > LWQRF = MAX( N/2, 1 ), LWUNQ2 = MAX( N, 1 ) */
784 /* > Then the minimal value of LCWORK is: */
785 /* > = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; */
786 /* > = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, */
787 /* > and a scaled condition estimate requested; */
789 /* > = N + MAX( LWQP3, LWSVD, LWUNQ ) if the singular values and the left */
790 /* > singular vectors are requested; */
791 /* > = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the singular values and the left */
792 /* > singular vectors are requested, and also */
793 /* > a scaled condition estimate requested; */
795 /* > = N + MAX( LWQP3, LWSVD ) if the singular values and the right */
796 /* > singular vectors are requested; */
797 /* > = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right */
798 /* > singular vectors are requested, and also */
799 /* > a scaled condition etimate requested; */
801 /* > = N + MAX( LWQP3, LWSVD, LWUNQ ) if the full SVD is requested with JOBV = 'R'; */
802 /* > independent of JOBR; */
803 /* > = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the full SVD is requested, */
804 /* > JOBV = 'R' and, also a scaled condition */
805 /* > estimate requested; independent of JOBR; */
806 /* > = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), */
807 /* > N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ) ) if the */
808 /* > full SVD is requested with JOBV = 'A' or 'V', and */
810 /* > = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), */
811 /* > N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ ) ) */
812 /* > if the full SVD is requested with JOBV = 'A' or 'V', and */
813 /* > JOBR ='N', and also a scaled condition number estimate */
815 /* > = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), */
816 /* > N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) if the */
817 /* > full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' */
818 /* > = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), */
819 /* > N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) */
820 /* > if the full SVD is requested with JOBV = 'A', 'V' and */
821 /* > JOBR ='T', and also a scaled condition number estimate */
823 /* > Finally, LCWORK must be at least two: LCWORK = MAX( 2, LCWORK ). */
825 /* > If LCWORK = -1, then a workspace query is assumed; the routine */
826 /* > only calculates and returns the optimal and minimal sizes */
827 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
828 /* > message related to LCWORK is issued by XERBLA. */
831 /* > \param[out] RWORK */
833 /* > RWORK is DOUBLE PRECISION array, dimension (f2cmax(1, LRWORK)). */
835 /* > 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition */
836 /* > number of column scaled A. If A = C * D where D is diagonal and C */
837 /* > has unit columns in the Euclidean norm, then, assuming full column rank, */
838 /* > N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). */
839 /* > Otherwise, RWORK(1) = -1. */
840 /* > 2. RWORK(2) contains the number of singular values computed as */
841 /* > exact zeros in ZGESVD applied to the upper triangular or trapeziodal */
842 /* > R (from the initial QR factorization). In case of early exit (no call to */
843 /* > ZGESVD, such as in the case of zero matrix) RWORK(2) = -1. */
845 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
846 /* > RWORK(1) returns the minimal LRWORK. */
849 /* > \param[in] LRWORK */
851 /* > LRWORK is INTEGER. */
852 /* > The dimension of the array RWORK. */
853 /* > If JOBP ='P', then LRWORK >= MAX(2, M, 5*N); */
854 /* > Otherwise, LRWORK >= MAX(2, 5*N). */
856 /* > If LRWORK = -1, then a workspace query is assumed; the routine */
857 /* > only calculates and returns the optimal and minimal sizes */
858 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
859 /* > message related to LCWORK is issued by XERBLA. */
862 /* > \param[out] INFO */
864 /* > INFO is INTEGER */
865 /* > = 0: successful exit. */
866 /* > < 0: if INFO = -i, the i-th argument had an illegal value. */
867 /* > > 0: if ZBDSQR did not converge, INFO specifies how many superdiagonals */
868 /* > of an intermediate bidiagonal form B (computed in ZGESVD) did not */
869 /* > converge to zero. */
872 /* > \par Further Details: */
873 /* ======================== */
877 /* > 1. The data movement (matrix transpose) is coded using simple nested */
878 /* > DO-loops because BLAS and LAPACK do not provide corresponding subroutines. */
879 /* > Those DO-loops are easily identified in this source code - by the CONTINUE */
880 /* > statements labeled with 11**. In an optimized version of this code, the */
881 /* > nested DO loops should be replaced with calls to an optimized subroutine. */
882 /* > 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause */
883 /* > column norm overflow. This is the minial precaution and it is left to the */
884 /* > SVD routine (CGESVD) to do its own preemptive scaling if potential over- */
885 /* > or underflows are detected. To avoid repeated scanning of the array A, */
886 /* > an optimal implementation would do all necessary scaling before calling */
887 /* > CGESVD and the scaling in CGESVD can be switched off. */
888 /* > 3. Other comments related to code optimization are given in comments in the */
889 /* > code, enlosed in [[double brackets]]. */
892 /* > \par Bugs, examples and comments */
893 /* =========================== */
896 /* > Please report all bugs and send interesting examples and/or comments to */
897 /* > drmac@math.hr. Thank you. */
900 /* > \par References */
901 /* =============== */
904 /* > [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for */
905 /* > Computing the SVD with High Accuracy. ACM Trans. Math. Softw. */
906 /* > 44(1): 11:1-11:30 (2017) */
908 /* > SIGMA library, xGESVDQ section updated February 2016. */
909 /* > Developed and coded by Zlatko Drmac, Department of Mathematics */
910 /* > University of Zagreb, Croatia, drmac@math.hr */
914 /* > \par Contributors: */
915 /* ================== */
918 /* > Developed and coded by Zlatko Drmac, Department of Mathematics */
919 /* > University of Zagreb, Croatia, drmac@math.hr */
925 /* > \author Univ. of Tennessee */
926 /* > \author Univ. of California Berkeley */
927 /* > \author Univ. of Colorado Denver */
928 /* > \author NAG Ltd. */
930 /* > \date November 2018 */
932 /* > \ingroup complex16GEsing */
934 /* ===================================================================== */
935 /* Subroutine */ int zgesvdq_(char *joba, char *jobp, char *jobr, char *jobu,
936 char *jobv, integer *m, integer *n, doublecomplex *a, integer *lda,
937 doublereal *s, doublecomplex *u, integer *ldu, doublecomplex *v,
938 integer *ldv, integer *numrank, integer *iwork, integer *liwork,
939 doublecomplex *cwork, integer *lcwork, doublereal *rwork, integer *
940 lrwork, integer *info)
942 /* System generated locals */
943 integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
948 /* Local variables */
949 integer lwrk_zunmlq__, lwrk_zunmqr__, ierr;
951 integer lwrk_zgesvd2__;
953 integer lwrk_zunmqr2__, optratio;
954 logical lsvc0, accla;
956 logical acclh, acclm;
959 extern logical lsame_(char *, char *);
961 doublereal sfmin, epsln;
964 integer lwlqf, lwqrf;
967 logical dntwu, dntwv, wntuf, wntva;
969 logical wntur, wntus, wntvr;
970 extern /* Subroutine */ int zgeqp3_(integer *, integer *, doublecomplex *,
971 integer *, integer *, doublecomplex *, doublecomplex *, integer *
972 , doublereal *, integer *);
973 extern doublereal dznrm2_(integer *, doublecomplex *, integer *);
974 integer lwsvd2, lwunq2;
975 extern doublereal dlamch_(char *);
977 extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
978 doublereal *, doublereal *, integer *, integer *, doublereal *,
979 integer *, integer *);
980 extern integer idamax_(integer *, doublereal *, integer *);
982 extern /* Subroutine */ int dlaset_(char *, integer *, integer *,
983 doublereal *, doublereal *, doublereal *, integer *),
984 xerbla_(char *, integer *, ftnlen), zdscal_(integer *, doublereal
985 *, doublecomplex *, integer *);
986 extern doublereal zlange_(char *, integer *, integer *, doublecomplex *,
987 integer *, doublereal *);
988 extern /* Subroutine */ int zgelqf_(integer *, integer *, doublecomplex *,
989 integer *, doublecomplex *, doublecomplex *, integer *, integer *
990 ), zlascl_(char *, integer *, integer *, doublereal *, doublereal
991 *, integer *, integer *, doublecomplex *, integer *, integer *);
992 doublecomplex cdummy[1];
993 extern /* Subroutine */ int zgeqrf_(integer *, integer *, doublecomplex *,
994 integer *, doublecomplex *, doublecomplex *, integer *, integer *
995 ), zgesvd_(char *, char *, integer *, integer *, doublecomplex *,
996 integer *, doublereal *, doublecomplex *, integer *,
997 doublecomplex *, integer *, doublecomplex *, integer *,
998 doublereal *, integer *), zlacpy_(char *, integer
999 *, integer *, doublecomplex *, integer *, doublecomplex *,
1000 integer *), zlaset_(char *, integer *, integer *,
1001 doublecomplex *, doublecomplex *, doublecomplex *, integer *);
1004 extern /* Subroutine */ int zlapmt_(logical *, integer *, integer *,
1005 doublecomplex *, integer *, integer *), zpocon_(char *, integer *,
1006 doublecomplex *, integer *, doublereal *, doublereal *,
1007 doublecomplex *, doublereal *, integer *);
1008 doublereal rdummy[1];
1011 extern /* Subroutine */ int zlaswp_(integer *, doublecomplex *, integer *,
1012 integer *, integer *, integer *, integer *);
1015 extern /* Subroutine */ int zunmlq_(char *, char *, integer *, integer *,
1016 integer *, doublecomplex *, integer *, doublecomplex *,
1017 doublecomplex *, integer *, doublecomplex *, integer *, integer *), zunmqr_(char *, char *, integer *, integer *,
1018 integer *, doublecomplex *, integer *, doublecomplex *,
1019 doublecomplex *, integer *, doublecomplex *, integer *, integer *);
1023 integer optwrk2, lwrk_zgeqp3__, iminwrk, rminwrk, lwrk_zgelqf__,
1024 lwrk_zgeqrf__, lwrk_zgesvd__;
1027 /* ===================================================================== */
1030 /* Test the input arguments */
1032 /* Parameter adjustments */
1034 a_offset = 1 + a_dim1 * 1;
1038 u_offset = 1 + u_dim1 * 1;
1041 v_offset = 1 + v_dim1 * 1;
1048 wntus = lsame_(jobu, "S") || lsame_(jobu, "U");
1049 wntur = lsame_(jobu, "R");
1050 wntua = lsame_(jobu, "A");
1051 wntuf = lsame_(jobu, "F");
1052 lsvc0 = wntus || wntur || wntua;
1053 lsvec = lsvc0 || wntuf;
1054 dntwu = lsame_(jobu, "N");
1056 wntvr = lsame_(jobv, "R");
1057 wntva = lsame_(jobv, "A") || lsame_(jobv, "V");
1058 rsvec = wntvr || wntva;
1059 dntwv = lsame_(jobv, "N");
1061 accla = lsame_(joba, "A");
1062 acclm = lsame_(joba, "M");
1063 conda = lsame_(joba, "E");
1064 acclh = lsame_(joba, "H") || conda;
1066 rowprm = lsame_(jobp, "P");
1067 rtrans = lsame_(jobr, "T");
1071 i__1 = 1, i__2 = *n + *m - 1;
1072 iminwrk = f2cmax(i__1,i__2);
1074 i__1 = f2cmax(2,*m), i__2 = *n * 5;
1075 rminwrk = f2cmax(i__1,i__2);
1077 iminwrk = f2cmax(1,*n);
1079 i__1 = 2, i__2 = *n * 5;
1080 rminwrk = f2cmax(i__1,i__2);
1082 lquery = *liwork == -1 || *lcwork == -1 || *lrwork == -1;
1084 if (! (accla || acclm || acclh)) {
1086 } else if (! (rowprm || lsame_(jobp, "N"))) {
1088 } else if (! (rtrans || lsame_(jobr, "N"))) {
1090 } else if (! (lsvec || dntwu)) {
1092 } else if (wntur && wntva) {
1094 } else if (! (rsvec || dntwv)) {
1096 } else if (*m < 0) {
1098 } else if (*n < 0 || *n > *m) {
1100 } else if (*lda < f2cmax(1,*m)) {
1102 } else if (*ldu < 1 || lsvc0 && *ldu < *m || wntuf && *ldu < *n) {
1104 } else if (*ldv < 1 || rsvec && *ldv < *n || conda && *ldv < *n) {
1106 } else if (*liwork < iminwrk && ! lquery) {
1112 /* [[The expressions for computing the minimal and the optimal */
1113 /* values of LCWORK are written with a lot of redundancy and */
1114 /* can be simplified. However, this detailed form is easier for */
1115 /* maintenance and modifications of the code.]] */
1118 if (wntus || wntur) {
1119 lwunq = f2cmax(*n,1);
1121 lwunq = f2cmax(*m,1);
1126 lwsvd = f2cmax(i__1,1);
1128 zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], cdummy, cdummy, &c_n1,
1130 lwrk_zgeqp3__ = (integer) cdummy[0].r;
1131 if (wntus || wntur) {
1132 zunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
1133 u_offset], ldu, cdummy, &c_n1, &ierr);
1134 lwrk_zunmqr__ = (integer) cdummy[0].r;
1136 zunmqr_("L", "N", m, m, n, &a[a_offset], lda, cdummy, &u[
1137 u_offset], ldu, cdummy, &c_n1, &ierr);
1138 lwrk_zunmqr__ = (integer) cdummy[0].r;
1145 if (! (lsvec || rsvec)) {
1146 /* only the singular values are requested */
1149 i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwcon);
1150 minwrk = f2cmax(i__1,lwsvd);
1154 minwrk = f2cmax(i__1,lwsvd);
1157 zgesvd_("N", "N", n, n, &a[a_offset], lda, &s[1], &u[u_offset]
1158 , ldu, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1160 lwrk_zgesvd__ = (integer) cdummy[0].r;
1163 i__1 = *n + lwrk_zgeqp3__, i__2 = *n + lwcon, i__1 = f2cmax(
1165 optwrk = f2cmax(i__1,lwrk_zgesvd__);
1168 i__1 = *n + lwrk_zgeqp3__;
1169 optwrk = f2cmax(i__1,lwrk_zgesvd__);
1172 } else if (lsvec && ! rsvec) {
1173 /* singular values and the left singular vectors are requested */
1176 i__1 = f2cmax(lwqp3,lwcon), i__1 = f2cmax(i__1,lwsvd);
1177 minwrk = *n + f2cmax(i__1,lwunq);
1180 i__1 = f2cmax(lwqp3,lwsvd);
1181 minwrk = *n + f2cmax(i__1,lwunq);
1185 zgesvd_("N", "O", n, n, &a[a_offset], lda, &s[1], &u[
1186 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1189 zgesvd_("O", "N", n, n, &a[a_offset], lda, &s[1], &u[
1190 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1193 lwrk_zgesvd__ = (integer) cdummy[0].r;
1196 i__1 = f2cmax(lwrk_zgeqp3__,lwcon), i__1 = f2cmax(i__1,
1198 optwrk = *n + f2cmax(i__1,lwrk_zunmqr__);
1201 i__1 = f2cmax(lwrk_zgeqp3__,lwrk_zgesvd__);
1202 optwrk = *n + f2cmax(i__1,lwrk_zunmqr__);
1205 } else if (rsvec && ! lsvec) {
1206 /* singular values and the right singular vectors are requested */
1209 i__1 = f2cmax(lwqp3,lwcon);
1210 minwrk = *n + f2cmax(i__1,lwsvd);
1212 minwrk = *n + f2cmax(lwqp3,lwsvd);
1216 zgesvd_("O", "N", n, n, &a[a_offset], lda, &s[1], &u[
1217 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1220 zgesvd_("N", "O", n, n, &a[a_offset], lda, &s[1], &u[
1221 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1224 lwrk_zgesvd__ = (integer) cdummy[0].r;
1227 i__1 = f2cmax(lwrk_zgeqp3__,lwcon);
1228 optwrk = *n + f2cmax(i__1,lwrk_zgesvd__);
1230 optwrk = *n + f2cmax(lwrk_zgeqp3__,lwrk_zgesvd__);
1234 /* full SVD is requested */
1237 i__1 = f2cmax(lwqp3,lwsvd);
1238 minwrk = f2cmax(i__1,lwunq);
1240 minwrk = f2cmax(minwrk,lwcon);
1246 lwqrf = f2cmax(i__1,1);
1249 lwsvd2 = f2cmax(i__1,1);
1250 lwunq2 = f2cmax(*n,1);
1252 i__1 = lwqp3, i__2 = *n / 2 + lwqrf, i__1 = f2cmax(i__1,i__2)
1253 , i__2 = *n / 2 + lwsvd2, i__1 = f2cmax(i__1,i__2),
1254 i__2 = *n / 2 + lwunq2, i__1 = f2cmax(i__1,i__2);
1255 minwrk2 = f2cmax(i__1,lwunq);
1257 minwrk2 = f2cmax(minwrk2,lwcon);
1259 minwrk2 = *n + minwrk2;
1260 minwrk = f2cmax(minwrk,minwrk2);
1264 i__1 = f2cmax(lwqp3,lwsvd);
1265 minwrk = f2cmax(i__1,lwunq);
1267 minwrk = f2cmax(minwrk,lwcon);
1273 lwlqf = f2cmax(i__1,1);
1276 lwsvd2 = f2cmax(i__1,1);
1277 lwunlq = f2cmax(*n,1);
1279 i__1 = lwqp3, i__2 = *n / 2 + lwlqf, i__1 = f2cmax(i__1,i__2)
1280 , i__2 = *n / 2 + lwsvd2, i__1 = f2cmax(i__1,i__2),
1281 i__2 = *n / 2 + lwunlq, i__1 = f2cmax(i__1,i__2);
1282 minwrk2 = f2cmax(i__1,lwunq);
1284 minwrk2 = f2cmax(minwrk2,lwcon);
1286 minwrk2 = *n + minwrk2;
1287 minwrk = f2cmax(minwrk,minwrk2);
1292 zgesvd_("O", "A", n, n, &a[a_offset], lda, &s[1], &u[
1293 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1295 lwrk_zgesvd__ = (integer) cdummy[0].r;
1297 i__1 = f2cmax(lwrk_zgeqp3__,lwrk_zgesvd__);
1298 optwrk = f2cmax(i__1,lwrk_zunmqr__);
1300 optwrk = f2cmax(optwrk,lwcon);
1302 optwrk = *n + optwrk;
1305 zgeqrf_(n, &i__1, &u[u_offset], ldu, cdummy, cdummy, &
1307 lwrk_zgeqrf__ = (integer) cdummy[0].r;
1310 zgesvd_("S", "O", &i__1, &i__2, &v[v_offset], ldv, &s[
1311 1], &u[u_offset], ldu, &v[v_offset], ldv,
1312 cdummy, &c_n1, rdummy, &ierr);
1313 lwrk_zgesvd2__ = (integer) cdummy[0].r;
1315 zunmqr_("R", "C", n, n, &i__1, &u[u_offset], ldu,
1316 cdummy, &v[v_offset], ldv, cdummy, &c_n1, &
1318 lwrk_zunmqr2__ = (integer) cdummy[0].r;
1320 i__1 = lwrk_zgeqp3__, i__2 = *n / 2 + lwrk_zgeqrf__,
1321 i__1 = f2cmax(i__1,i__2), i__2 = *n / 2 +
1322 lwrk_zgesvd2__, i__1 = f2cmax(i__1,i__2), i__2 =
1323 *n / 2 + lwrk_zunmqr2__;
1324 optwrk2 = f2cmax(i__1,i__2);
1326 optwrk2 = f2cmax(optwrk2,lwcon);
1328 optwrk2 = *n + optwrk2;
1329 optwrk = f2cmax(optwrk,optwrk2);
1332 zgesvd_("S", "O", n, n, &a[a_offset], lda, &s[1], &u[
1333 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1335 lwrk_zgesvd__ = (integer) cdummy[0].r;
1337 i__1 = f2cmax(lwrk_zgeqp3__,lwrk_zgesvd__);
1338 optwrk = f2cmax(i__1,lwrk_zunmqr__);
1340 optwrk = f2cmax(optwrk,lwcon);
1342 optwrk = *n + optwrk;
1345 zgelqf_(&i__1, n, &u[u_offset], ldu, cdummy, cdummy, &
1347 lwrk_zgelqf__ = (integer) cdummy[0].r;
1350 zgesvd_("S", "O", &i__1, &i__2, &v[v_offset], ldv, &s[
1351 1], &u[u_offset], ldu, &v[v_offset], ldv,
1352 cdummy, &c_n1, rdummy, &ierr);
1353 lwrk_zgesvd2__ = (integer) cdummy[0].r;
1355 zunmlq_("R", "N", n, n, &i__1, &u[u_offset], ldu,
1356 cdummy, &v[v_offset], ldv, cdummy, &c_n1, &
1358 lwrk_zunmlq__ = (integer) cdummy[0].r;
1360 i__1 = lwrk_zgeqp3__, i__2 = *n / 2 + lwrk_zgelqf__,
1361 i__1 = f2cmax(i__1,i__2), i__2 = *n / 2 +
1362 lwrk_zgesvd2__, i__1 = f2cmax(i__1,i__2), i__2 =
1363 *n / 2 + lwrk_zunmlq__;
1364 optwrk2 = f2cmax(i__1,i__2);
1366 optwrk2 = f2cmax(optwrk2,lwcon);
1368 optwrk2 = *n + optwrk2;
1369 optwrk = f2cmax(optwrk,optwrk2);
1375 minwrk = f2cmax(2,minwrk);
1376 optwrk = f2cmax(2,optwrk);
1377 if (*lcwork < minwrk && ! lquery) {
1383 if (*info == 0 && *lrwork < rminwrk && ! lquery) {
1388 xerbla_("ZGESVDQ", &i__1, (ftnlen)7);
1390 } else if (lquery) {
1392 /* Return optimal workspace */
1395 cwork[1].r = (doublereal) optwrk, cwork[1].i = 0.;
1396 cwork[2].r = (doublereal) minwrk, cwork[2].i = 0.;
1397 rwork[1] = (doublereal) rminwrk;
1401 /* Quick return if the matrix is void. */
1403 if (*m == 0 || *n == 0) {
1410 /* ell-infinity norm - this enhances numerical robustness in */
1411 /* the case of differently scaled rows. */
1413 for (p = 1; p <= i__1; ++p) {
1414 /* RWORK(p) = ABS( A(p,IZAMAX(N,A(p,1),LDA)) ) */
1415 /* [[ZLANGE will return NaN if an entry of the p-th row is Nan]] */
1416 rwork[p] = zlange_("M", &c__1, n, &a[p + a_dim1], lda, rdummy);
1417 if (rwork[p] != rwork[p] || rwork[p] * 0. != 0.) {
1420 xerbla_("ZGESVDQ", &i__2, (ftnlen)7);
1426 for (p = 1; p <= i__1; ++p) {
1428 q = idamax_(&i__2, &rwork[p], &c__1) + p - 1;
1432 rwork[p] = rwork[q];
1438 if (rwork[1] == 0.) {
1439 /* Quick return: A is the M x N zero matrix. */
1441 dlaset_("G", n, &c__1, &c_b74, &c_b74, &s[1], n);
1443 zlaset_("G", m, n, &c_b1, &c_b2, &u[u_offset], ldu)
1447 zlaset_("G", m, m, &c_b1, &c_b2, &u[u_offset], ldu)
1451 zlaset_("G", n, n, &c_b1, &c_b2, &v[v_offset], ldv)
1455 zlaset_("G", n, &c__1, &c_b1, &c_b1, &cwork[1], n);
1456 zlaset_("G", m, n, &c_b1, &c_b2, &u[u_offset], ldu)
1460 for (p = 1; p <= i__1; ++p) {
1466 for (p = *n + 1; p <= i__1; ++p) {
1478 if (rwork[1] > big / sqrt((doublereal) (*m))) {
1479 /* matrix by 1/sqrt(M) if too large entry detected */
1480 d__1 = sqrt((doublereal) (*m));
1481 zlascl_("G", &c__0, &c__0, &d__1, &c_b87, m, n, &a[a_offset], lda,
1486 zlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[*n + 1], &c__1);
1489 /* norms overflows during the QR factorization. The SVD procedure should */
1490 /* have its own scaling to save the singular values from overflows and */
1491 /* underflows. That depends on the SVD procedure. */
1494 rtmp = zlange_("M", m, n, &a[a_offset], lda, &rwork[1]);
1495 if (rtmp != rtmp || rtmp * 0. != 0.) {
1498 xerbla_("ZGESVDQ", &i__1, (ftnlen)7);
1501 if (rtmp > big / sqrt((doublereal) (*m))) {
1502 /* matrix by 1/sqrt(M) if too large entry detected */
1503 d__1 = sqrt((doublereal) (*m));
1504 zlascl_("G", &c__0, &c__0, &d__1, &c_b87, m, n, &a[a_offset], lda,
1511 /* A * P = Q * [ R ] */
1515 for (p = 1; p <= i__1; ++p) {
1519 i__1 = *lcwork - *n;
1520 zgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &cwork[1], &cwork[*n + 1], &
1521 i__1, &rwork[1], &ierr);
1523 /* If the user requested accuracy level allows truncation in the */
1524 /* computed upper triangular factor, the matrix R is examined and, */
1525 /* if possible, replaced with its leading upper trapezoidal part. */
1527 epsln = dlamch_("E");
1528 sfmin = dlamch_("S");
1529 /* SMALL = SFMIN / EPSLN */
1534 /* Standard absolute error bound suffices. All sigma_i with */
1535 /* sigma_i < N*EPS*||A||_F are flushed to zero. This is an */
1536 /* aggressive enforcement of lower numerical rank by introducing a */
1537 /* backward error of the order of N*EPS*||A||_F. */
1539 rtmp = sqrt((doublereal) (*n)) * epsln;
1541 for (p = 2; p <= i__1; ++p) {
1542 if (z_abs(&a[p + p * a_dim1]) < rtmp * z_abs(&a[a_dim1 + 1])) {
1552 /* Sudden drop on the diagonal of R is used as the criterion for being */
1553 /* close-to-rank-deficient. The threshold is set to EPSLN=DLAMCH('E'). */
1554 /* [[This can be made more flexible by replacing this hard-coded value */
1555 /* with a user specified threshold.]] Also, the values that underflow */
1556 /* will be truncated. */
1559 for (p = 2; p <= i__1; ++p) {
1560 if (z_abs(&a[p + p * a_dim1]) < epsln * z_abs(&a[p - 1 + (p - 1) *
1561 a_dim1]) || z_abs(&a[p + p * a_dim1]) < sfmin) {
1571 /* obvious case of zero pivots. */
1572 /* R(i,i)=0 => R(i:N,i:N)=0. */
1575 for (p = 2; p <= i__1; ++p) {
1576 if (z_abs(&a[p + p * a_dim1]) == 0.) {
1585 /* Estimate the scaled condition number of A. Use the fact that it is */
1586 /* the same as the scaled condition number of R. */
1587 zlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
1588 /* Only the leading NR x NR submatrix of the triangular factor */
1589 /* is considered. Only if NR=N will this give a reliable error */
1590 /* bound. However, even for NR < N, this can be used on an */
1591 /* expert level and obtain useful information in the sense of */
1592 /* perturbation theory. */
1594 for (p = 1; p <= i__1; ++p) {
1595 rtmp = dznrm2_(&p, &v[p * v_dim1 + 1], &c__1);
1597 zdscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
1600 if (! (lsvec || rsvec)) {
1601 zpocon_("U", &nr, &v[v_offset], ldv, &c_b87, &rtmp, &cwork[1],
1604 zpocon_("U", &nr, &v[v_offset], ldv, &c_b87, &rtmp, &cwork[*n
1605 + 1], &rwork[1], &ierr);
1607 sconda = 1. / sqrt(rtmp);
1608 /* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), */
1609 /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
1610 /* See the reference [1] for more details. */
1617 } else if (wntus || wntuf) {
1623 if (! (rsvec || lsvec)) {
1624 /* ....................................................................... */
1625 /* ....................................................................... */
1628 /* the upper triangle of [A] to zero. */
1629 i__1 = f2cmin(*n,nr);
1630 for (p = 1; p <= i__1; ++p) {
1631 i__2 = p + p * a_dim1;
1632 d_cnjg(&z__1, &a[p + p * a_dim1]);
1633 a[i__2].r = z__1.r, a[i__2].i = z__1.i;
1635 for (q = p + 1; q <= i__2; ++q) {
1636 i__3 = q + p * a_dim1;
1637 d_cnjg(&z__1, &a[p + q * a_dim1]);
1638 a[i__3].r = z__1.r, a[i__3].i = z__1.i;
1640 i__3 = p + q * a_dim1;
1641 a[i__3].r = 0., a[i__3].i = 0.;
1648 zgesvd_("N", "N", n, &nr, &a[a_offset], lda, &s[1], &u[u_offset],
1649 ldu, &v[v_offset], ldv, &cwork[1], lcwork, &rwork[1],
1658 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
1660 zgesvd_("N", "N", &nr, n, &a[a_offset], lda, &s[1], &u[u_offset],
1661 ldu, &v[v_offset], ldv, &cwork[1], lcwork, &rwork[1],
1666 } else if (lsvec && ! rsvec) {
1667 /* ....................................................................... */
1668 /* ......................................................................."""""""" */
1672 for (p = 1; p <= i__1; ++p) {
1674 for (q = p; q <= i__2; ++q) {
1675 i__3 = q + p * u_dim1;
1676 d_cnjg(&z__1, &a[p + q * a_dim1]);
1677 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
1685 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1]
1688 /* vectors overwrite [U](1:NR,1:NR) as conjugate transposed. These */
1689 /* will be pre-multiplied by Q to build the left singular vectors of A. */
1690 i__1 = *lcwork - *n;
1691 zgesvd_("N", "O", n, &nr, &u[u_offset], ldu, &s[1], &u[u_offset],
1692 ldu, &u[u_offset], ldu, &cwork[*n + 1], &i__1, &rwork[1],
1696 for (p = 1; p <= i__1; ++p) {
1697 i__2 = p + p * u_dim1;
1698 d_cnjg(&z__1, &u[p + p * u_dim1]);
1699 u[i__2].r = z__1.r, u[i__2].i = z__1.i;
1701 for (q = p + 1; q <= i__2; ++q) {
1702 d_cnjg(&z__1, &u[q + p * u_dim1]);
1703 ctmp.r = z__1.r, ctmp.i = z__1.i;
1704 i__3 = q + p * u_dim1;
1705 d_cnjg(&z__1, &u[p + q * u_dim1]);
1706 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
1707 i__3 = p + q * u_dim1;
1708 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
1715 zlacpy_("U", &nr, n, &a[a_offset], lda, &u[u_offset], ldu);
1719 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &u[u_dim1 + 2], ldu);
1721 /* vectors overwrite [U](1:NR,1:NR) */
1722 i__1 = *lcwork - *n;
1723 zgesvd_("O", "N", &nr, n, &u[u_offset], ldu, &s[1], &u[u_offset],
1724 ldu, &v[v_offset], ldv, &cwork[*n + 1], &i__1, &rwork[1],
1726 /* R. These will be pre-multiplied by Q to build the left singular */
1730 /* (M x NR) or (M x N) or (M x M). */
1731 if (nr < *m && ! wntuf) {
1733 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1], ldu);
1736 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) * u_dim1 +
1740 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr + 1)
1745 /* The Q matrix from the first QRF is built into the left singular */
1746 /* vectors matrix U. */
1749 i__1 = *lcwork - *n;
1750 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
1751 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
1753 if (rowprm && ! wntuf) {
1755 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[*n + 1], &
1759 } else if (rsvec && ! lsvec) {
1760 /* ....................................................................... */
1761 /* ....................................................................... */
1764 for (p = 1; p <= i__1; ++p) {
1766 for (q = p; q <= i__2; ++q) {
1767 i__3 = q + p * v_dim1;
1768 d_cnjg(&z__1, &a[p + q * a_dim1]);
1769 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1777 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1]
1780 /* vectors not computed */
1781 if (wntvr || nr == *n) {
1782 i__1 = *lcwork - *n;
1783 zgesvd_("O", "N", n, &nr, &v[v_offset], ldv, &s[1], &u[
1784 u_offset], ldu, &u[u_offset], ldu, &cwork[*n + 1], &
1785 i__1, &rwork[1], info);
1788 for (p = 1; p <= i__1; ++p) {
1789 i__2 = p + p * v_dim1;
1790 d_cnjg(&z__1, &v[p + p * v_dim1]);
1791 v[i__2].r = z__1.r, v[i__2].i = z__1.i;
1793 for (q = p + 1; q <= i__2; ++q) {
1794 d_cnjg(&z__1, &v[q + p * v_dim1]);
1795 ctmp.r = z__1.r, ctmp.i = z__1.i;
1796 i__3 = q + p * v_dim1;
1797 d_cnjg(&z__1, &v[p + q * v_dim1]);
1798 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1799 i__3 = p + q * v_dim1;
1800 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1808 for (p = 1; p <= i__1; ++p) {
1810 for (q = nr + 1; q <= i__2; ++q) {
1811 i__3 = p + q * v_dim1;
1812 d_cnjg(&z__1, &v[q + p * v_dim1]);
1813 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1819 zlapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1821 /* [!] This is simple implementation that augments [V](1:N,1:NR) */
1822 /* by padding a zero block. In the case NR << N, a more efficient */
1823 /* way is to first use the QR factorization. For more details */
1824 /* how to implement this, see the " FULL SVD " branch. */
1826 zlaset_("G", n, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 + 1]
1828 i__1 = *lcwork - *n;
1829 zgesvd_("O", "N", n, n, &v[v_offset], ldv, &s[1], &u[u_offset]
1830 , ldu, &u[u_offset], ldu, &cwork[*n + 1], &i__1, &
1834 for (p = 1; p <= i__1; ++p) {
1835 i__2 = p + p * v_dim1;
1836 d_cnjg(&z__1, &v[p + p * v_dim1]);
1837 v[i__2].r = z__1.r, v[i__2].i = z__1.i;
1839 for (q = p + 1; q <= i__2; ++q) {
1840 d_cnjg(&z__1, &v[q + p * v_dim1]);
1841 ctmp.r = z__1.r, ctmp.i = z__1.i;
1842 i__3 = q + p * v_dim1;
1843 d_cnjg(&z__1, &v[p + q * v_dim1]);
1844 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1845 i__3 = p + q * v_dim1;
1846 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1851 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
1855 zlacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
1859 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 + 2], ldv);
1861 /* vectors stored in U(1:NR,1:NR) */
1862 if (wntvr || nr == *n) {
1863 i__1 = *lcwork - *n;
1864 zgesvd_("N", "O", &nr, n, &v[v_offset], ldv, &s[1], &u[
1865 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1], &
1866 i__1, &rwork[1], info);
1867 zlapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1869 /* [!] This is simple implementation that augments [V](1:NR,1:N) */
1870 /* by padding a zero block. In the case NR << N, a more efficient */
1871 /* way is to first use the LQ factorization. For more details */
1872 /* how to implement this, see the " FULL SVD " branch. */
1874 zlaset_("G", &i__1, n, &c_b1, &c_b1, &v[nr + 1 + v_dim1], ldv);
1875 i__1 = *lcwork - *n;
1876 zgesvd_("N", "O", n, n, &v[v_offset], ldv, &s[1], &u[u_offset]
1877 , ldu, &v[v_offset], ldv, &cwork[*n + 1], &i__1, &
1879 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
1885 /* ....................................................................... */
1886 /* ....................................................................... */
1890 if (wntvr || nr == *n) {
1891 /* vectors of R**H */
1893 for (p = 1; p <= i__1; ++p) {
1895 for (q = p; q <= i__2; ++q) {
1896 i__3 = q + p * v_dim1;
1897 d_cnjg(&z__1, &a[p + q * a_dim1]);
1898 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1906 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
1910 /* singular vectors of R**H stored in [U](1:NR,1:NR) as conjugate */
1912 i__1 = *lcwork - *n;
1913 zgesvd_("O", "A", n, &nr, &v[v_offset], ldv, &s[1], &v[
1914 v_offset], ldv, &u[u_offset], ldu, &cwork[*n + 1], &
1915 i__1, &rwork[1], info);
1917 for (p = 1; p <= i__1; ++p) {
1918 i__2 = p + p * v_dim1;
1919 d_cnjg(&z__1, &v[p + p * v_dim1]);
1920 v[i__2].r = z__1.r, v[i__2].i = z__1.i;
1922 for (q = p + 1; q <= i__2; ++q) {
1923 d_cnjg(&z__1, &v[q + p * v_dim1]);
1924 ctmp.r = z__1.r, ctmp.i = z__1.i;
1925 i__3 = q + p * v_dim1;
1926 d_cnjg(&z__1, &v[p + q * v_dim1]);
1927 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1928 i__3 = p + q * v_dim1;
1929 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1936 for (p = 1; p <= i__1; ++p) {
1938 for (q = nr + 1; q <= i__2; ++q) {
1939 i__3 = p + q * v_dim1;
1940 d_cnjg(&z__1, &v[q + p * v_dim1]);
1941 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
1947 zlapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1950 for (p = 1; p <= i__1; ++p) {
1951 i__2 = p + p * u_dim1;
1952 d_cnjg(&z__1, &u[p + p * u_dim1]);
1953 u[i__2].r = z__1.r, u[i__2].i = z__1.i;
1955 for (q = p + 1; q <= i__2; ++q) {
1956 d_cnjg(&z__1, &u[q + p * u_dim1]);
1957 ctmp.r = z__1.r, ctmp.i = z__1.i;
1958 i__3 = q + p * u_dim1;
1959 d_cnjg(&z__1, &u[p + q * u_dim1]);
1960 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
1961 i__3 = p + q * u_dim1;
1962 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
1968 if (nr < *m && ! wntuf) {
1970 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
1974 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
1978 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
1979 nr + 1) * u_dim1], ldu);
1984 /* vectors of R**H */
1985 /* [[The optimal ratio N/NR for using QRF instead of padding */
1986 /* with zeros. Here hard coded to 2; it must be at least */
1987 /* two due to work space constraints.]] */
1988 /* OPTRATIO = ILAENV(6, 'ZGESVD', 'S' // 'O', NR,N,0,0) */
1989 /* OPTRATIO = MAX( OPTRATIO, 2 ) */
1991 if (optratio * nr > *n) {
1993 for (p = 1; p <= i__1; ++p) {
1995 for (q = p; q <= i__2; ++q) {
1996 i__3 = q + p * v_dim1;
1997 d_cnjg(&z__1, &a[p + q * a_dim1]);
1998 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2006 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 <<
2011 zlaset_("A", n, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1
2013 i__1 = *lcwork - *n;
2014 zgesvd_("O", "A", n, n, &v[v_offset], ldv, &s[1], &v[
2015 v_offset], ldv, &u[u_offset], ldu, &cwork[*n + 1],
2016 &i__1, &rwork[1], info);
2019 for (p = 1; p <= i__1; ++p) {
2020 i__2 = p + p * v_dim1;
2021 d_cnjg(&z__1, &v[p + p * v_dim1]);
2022 v[i__2].r = z__1.r, v[i__2].i = z__1.i;
2024 for (q = p + 1; q <= i__2; ++q) {
2025 d_cnjg(&z__1, &v[q + p * v_dim1]);
2026 ctmp.r = z__1.r, ctmp.i = z__1.i;
2027 i__3 = q + p * v_dim1;
2028 d_cnjg(&z__1, &v[p + q * v_dim1]);
2029 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2030 i__3 = p + q * v_dim1;
2031 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
2036 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2037 /* (M x N1), i.e. (M x N) or (M x M). */
2040 for (p = 1; p <= i__1; ++p) {
2041 i__2 = p + p * u_dim1;
2042 d_cnjg(&z__1, &u[p + p * u_dim1]);
2043 u[i__2].r = z__1.r, u[i__2].i = z__1.i;
2045 for (q = p + 1; q <= i__2; ++q) {
2046 d_cnjg(&z__1, &u[q + p * u_dim1]);
2047 ctmp.r = z__1.r, ctmp.i = z__1.i;
2048 i__3 = q + p * u_dim1;
2049 d_cnjg(&z__1, &u[p + q * u_dim1]);
2050 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
2051 i__3 = p + q * u_dim1;
2052 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
2058 if (*n < *m && ! wntuf) {
2060 zlaset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 +
2064 zlaset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
2068 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n +
2069 1 + (*n + 1) * u_dim1], ldu);
2073 /* singular vectors of R */
2075 for (p = 1; p <= i__1; ++p) {
2077 for (q = p; q <= i__2; ++q) {
2078 i__3 = q + (nr + p) * u_dim1;
2079 d_cnjg(&z__1, &a[p + q * a_dim1]);
2080 u[i__3].r = z__1.r, u[i__3].i = z__1.i;
2088 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(nr + 2) *
2091 i__1 = *lcwork - *n - nr;
2092 zgeqrf_(n, &nr, &u[(nr + 1) * u_dim1 + 1], ldu, &cwork[*n
2093 + 1], &cwork[*n + nr + 1], &i__1, &ierr);
2095 for (p = 1; p <= i__1; ++p) {
2097 for (q = 1; q <= i__2; ++q) {
2098 i__3 = q + p * v_dim1;
2099 d_cnjg(&z__1, &u[p + (nr + q) * u_dim1]);
2100 v[i__3].r = z__1.r, v[i__3].i = z__1.i;
2107 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
2109 i__1 = *lcwork - *n - nr;
2110 zgesvd_("S", "O", &nr, &nr, &v[v_offset], ldv, &s[1], &u[
2111 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + nr
2112 + 1], &i__1, &rwork[1], info);
2114 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1]
2117 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
2121 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr
2122 + 1) * v_dim1], ldv);
2123 i__1 = *lcwork - *n - nr;
2124 zunmqr_("R", "C", n, n, &nr, &u[(nr + 1) * u_dim1 + 1],
2125 ldu, &cwork[*n + 1], &v[v_offset], ldv, &cwork[*n
2126 + nr + 1], &i__1, &ierr);
2127 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2128 /* (M x NR) or (M x N) or (M x M). */
2129 if (nr < *m && ! wntuf) {
2131 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 +
2135 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1)
2136 * u_dim1 + 1], ldu);
2139 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr +
2140 1 + (nr + 1) * u_dim1], ldu);
2149 if (wntvr || nr == *n) {
2150 zlacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
2154 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 + 2],
2157 /* singular vectors of R stored in [U](1:NR,1:NR) */
2158 i__1 = *lcwork - *n;
2159 zgesvd_("S", "O", &nr, n, &v[v_offset], ldv, &s[1], &u[
2160 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1], &
2161 i__1, &rwork[1], info);
2162 zlapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
2163 /* (M x NR) or (M x N) or (M x M). */
2164 if (nr < *m && ! wntuf) {
2166 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
2170 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
2174 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
2175 nr + 1) * u_dim1], ldu);
2180 /* is then N1 (N or M) */
2181 /* [[The optimal ratio N/NR for using LQ instead of padding */
2182 /* with zeros. Here hard coded to 2; it must be at least */
2183 /* two due to work space constraints.]] */
2184 /* OPTRATIO = ILAENV(6, 'ZGESVD', 'S' // 'O', NR,N,0,0) */
2185 /* OPTRATIO = MAX( OPTRATIO, 2 ) */
2187 if (optratio * nr > *n) {
2188 zlacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
2192 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 +
2195 /* singular vectors of R stored in [U](1:NR,1:NR) */
2197 zlaset_("A", &i__1, n, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
2199 i__1 = *lcwork - *n;
2200 zgesvd_("S", "O", n, n, &v[v_offset], ldv, &s[1], &u[
2201 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1],
2202 &i__1, &rwork[1], info);
2203 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2204 /* singular vectors of A. The leading N left singular vectors */
2205 /* are in [U](1:N,1:N) */
2206 /* (M x N1), i.e. (M x N) or (M x M). */
2207 if (*n < *m && ! wntuf) {
2209 zlaset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 +
2213 zlaset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
2217 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n +
2218 1 + (*n + 1) * u_dim1], ldu);
2222 zlacpy_("U", &nr, n, &a[a_offset], lda, &u[nr + 1 +
2227 zlaset_("L", &i__1, &i__2, &c_b1, &c_b1, &u[nr + 2 +
2230 i__1 = *lcwork - *n - nr;
2231 zgelqf_(&nr, n, &u[nr + 1 + u_dim1], ldu, &cwork[*n + 1],
2232 &cwork[*n + nr + 1], &i__1, &ierr);
2233 zlacpy_("L", &nr, &nr, &u[nr + 1 + u_dim1], ldu, &v[
2238 zlaset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 <<
2241 i__1 = *lcwork - *n - nr;
2242 zgesvd_("S", "O", &nr, &nr, &v[v_offset], ldv, &s[1], &u[
2243 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + nr
2244 + 1], &i__1, &rwork[1], info);
2246 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1]
2249 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
2253 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr
2254 + 1) * v_dim1], ldv);
2255 i__1 = *lcwork - *n - nr;
2256 zunmlq_("R", "N", n, n, &nr, &u[nr + 1 + u_dim1], ldu, &
2257 cwork[*n + 1], &v[v_offset], ldv, &cwork[*n + nr
2258 + 1], &i__1, &ierr);
2259 zlapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2260 /* (M x NR) or (M x N) or (M x M). */
2261 if (nr < *m && ! wntuf) {
2263 zlaset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 +
2267 zlaset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1)
2268 * u_dim1 + 1], ldu);
2271 zlaset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr +
2272 1 + (nr + 1) * u_dim1], ldu);
2279 /* The Q matrix from the first QRF is built into the left singular */
2280 /* vectors matrix U. */
2283 i__1 = *lcwork - *n;
2284 zunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
2285 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
2287 if (rowprm && ! wntuf) {
2289 zlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[*n + 1], &
2293 /* ... end of the "full SVD" branch */
2296 /* Check whether some singular values are returned as zeros, e.g. */
2297 /* due to underflow, and update the numerical rank. */
2299 for (q = p; q >= 1; --q) {
2308 /* singular values are set to zero. */
2311 dlaset_("G", &i__1, &c__1, &c_b74, &c_b74, &s[nr + 1], n);
2315 d__1 = sqrt((doublereal) (*m));
2316 dlascl_("G", &c__0, &c__0, &c_b87, &d__1, &nr, &c__1, &s[1], n, &ierr);
2321 rwork[2] = (doublereal) (p - nr);
2322 /* exact zeros in ZGESVD() applied to the (possibly truncated) */
2323 /* full row rank triangular (trapezoidal) factor of A. */
2328 /* End of ZGESVDQ */