14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static complex c_b1 = {0.f,0.f};
516 static complex c_b2 = {1.f,0.f};
517 static integer c_n1 = -1;
518 static integer c__1 = 1;
519 static real c_b74 = 0.f;
520 static integer c__0 = 0;
521 static real c_b87 = 1.f;
522 static logical c_false = FALSE_;
524 /* > \brief <b> CGESVDQ computes the singular value decomposition (SVD) with a QR-Preconditioned QR SVD Method
525 for GE matrices</b> */
527 /* =========== DOCUMENTATION =========== */
529 /* Online html documentation available at */
530 /* http://www.netlib.org/lapack/explore-html/ */
533 /* > Download CGESVDQ + dependencies */
534 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/cgesvdq
537 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/cgesvdq
540 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/cgesvdq
548 /* SUBROUTINE CGESVDQ( JOBA, JOBP, JOBR, JOBU, JOBV, M, N, A, LDA, */
549 /* S, U, LDU, V, LDV, NUMRANK, IWORK, LIWORK, */
550 /* CWORK, LCWORK, RWORK, LRWORK, INFO ) */
553 /* CHARACTER JOBA, JOBP, JOBR, JOBU, JOBV */
554 /* INTEGER M, N, LDA, LDU, LDV, NUMRANK, LIWORK, LCWORK, LRWORK, */
556 /* COMPLEX A( LDA, * ), U( LDU, * ), V( LDV, * ), CWORK( * ) */
557 /* REAL S( * ), RWORK( * ) */
558 /* INTEGER IWORK( * ) */
561 /* > \par Purpose: */
566 /* > CGESVDQ computes the singular value decomposition (SVD) of a complex */
567 /* > M-by-N matrix A, where M >= N. The SVD of A is written as */
568 /* > [++] [xx] [x0] [xx] */
569 /* > A = U * SIGMA * V^*, [++] = [xx] * [ox] * [xx] */
571 /* > where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal */
572 /* > matrix, and V is an N-by-N unitary matrix. The diagonal elements */
573 /* > of SIGMA are the singular values of A. The columns of U and V are the */
574 /* > left and the right singular vectors of A, respectively. */
580 /* > \param[in] JOBA */
582 /* > JOBA is CHARACTER*1 */
583 /* > Specifies the level of accuracy in the computed SVD */
584 /* > = 'A' The requested accuracy corresponds to having the backward */
585 /* > error bounded by || delta A ||_F <= f(m,n) * EPS * || A ||_F, */
586 /* > where EPS = SLAMCH('Epsilon'). This authorises CGESVDQ to */
587 /* > truncate the computed triangular factor in a rank revealing */
588 /* > QR factorization whenever the truncated part is below the */
589 /* > threshold of the order of EPS * ||A||_F. This is aggressive */
590 /* > truncation level. */
591 /* > = 'M' Similarly as with 'A', but the truncation is more gentle: it */
592 /* > is allowed only when there is a drop on the diagonal of the */
593 /* > triangular factor in the QR factorization. This is medium */
594 /* > truncation level. */
595 /* > = 'H' High accuracy requested. No numerical rank determination based */
596 /* > on the rank revealing QR factorization is attempted. */
597 /* > = 'E' Same as 'H', and in addition the condition number of column */
598 /* > scaled A is estimated and returned in RWORK(1). */
599 /* > N^(-1/4)*RWORK(1) <= ||pinv(A_scaled)||_2 <= N^(1/4)*RWORK(1) */
602 /* > \param[in] JOBP */
604 /* > JOBP is CHARACTER*1 */
605 /* > = 'P' The rows of A are ordered in decreasing order with respect to */
606 /* > ||A(i,:)||_\infty. This enhances numerical accuracy at the cost */
607 /* > of extra data movement. Recommended for numerical robustness. */
608 /* > = 'N' No row pivoting. */
611 /* > \param[in] JOBR */
613 /* > JOBR is CHARACTER*1 */
614 /* > = 'T' After the initial pivoted QR factorization, CGESVD is applied to */
615 /* > the adjoint R**H of the computed triangular factor R. This involves */
616 /* > some extra data movement (matrix transpositions). Useful for */
617 /* > experiments, research and development. */
618 /* > = 'N' The triangular factor R is given as input to CGESVD. This may be */
619 /* > preferred as it involves less data movement. */
622 /* > \param[in] JOBU */
624 /* > JOBU is CHARACTER*1 */
625 /* > = 'A' All M left singular vectors are computed and returned in the */
626 /* > matrix U. See the description of U. */
627 /* > = 'S' or 'U' N = f2cmin(M,N) left singular vectors are computed and returned */
628 /* > in the matrix U. See the description of U. */
629 /* > = 'R' Numerical rank NUMRANK is determined and only NUMRANK left singular */
630 /* > vectors are computed and returned in the matrix U. */
631 /* > = 'F' The N left singular vectors are returned in factored form as the */
632 /* > product of the Q factor from the initial QR factorization and the */
633 /* > N left singular vectors of (R**H , 0)**H. If row pivoting is used, */
634 /* > then the necessary information on the row pivoting is stored in */
635 /* > IWORK(N+1:N+M-1). */
636 /* > = 'N' The left singular vectors are not computed. */
639 /* > \param[in] JOBV */
641 /* > JOBV is CHARACTER*1 */
642 /* > = 'A', 'V' All N right singular vectors are computed and returned in */
643 /* > the matrix V. */
644 /* > = 'R' Numerical rank NUMRANK is determined and only NUMRANK right singular */
645 /* > vectors are computed and returned in the matrix V. This option is */
646 /* > allowed only if JOBU = 'R' or JOBU = 'N'; otherwise it is illegal. */
647 /* > = 'N' The right singular vectors are not computed. */
653 /* > The number of rows of the input matrix A. M >= 0. */
659 /* > The number of columns of the input matrix A. M >= N >= 0. */
662 /* > \param[in,out] A */
664 /* > A is COMPLEX array of dimensions LDA x N */
665 /* > On entry, the input matrix A. */
666 /* > On exit, if JOBU .NE. 'N' or JOBV .NE. 'N', the lower triangle of A contains */
667 /* > the Householder vectors as stored by CGEQP3. If JOBU = 'F', these Householder */
668 /* > vectors together with CWORK(1:N) can be used to restore the Q factors from */
669 /* > the initial pivoted QR factorization of A. See the description of U. */
672 /* > \param[in] LDA */
674 /* > LDA is INTEGER. */
675 /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
678 /* > \param[out] S */
680 /* > S is REAL array of dimension N. */
681 /* > The singular values of A, ordered so that S(i) >= S(i+1). */
684 /* > \param[out] U */
686 /* > U is COMPLEX array, dimension */
687 /* > LDU x M if JOBU = 'A'; see the description of LDU. In this case, */
688 /* > on exit, U contains the M left singular vectors. */
689 /* > LDU x N if JOBU = 'S', 'U', 'R' ; see the description of LDU. In this */
690 /* > case, U contains the leading N or the leading NUMRANK left singular vectors. */
691 /* > LDU x N if JOBU = 'F' ; see the description of LDU. In this case U */
692 /* > contains N x N unitary matrix that can be used to form the left */
693 /* > singular vectors. */
694 /* > If JOBU = 'N', U is not referenced. */
697 /* > \param[in] LDU */
699 /* > LDU is INTEGER. */
700 /* > The leading dimension of the array U. */
701 /* > If JOBU = 'A', 'S', 'U', 'R', LDU >= f2cmax(1,M). */
702 /* > If JOBU = 'F', LDU >= f2cmax(1,N). */
703 /* > Otherwise, LDU >= 1. */
706 /* > \param[out] V */
708 /* > V is COMPLEX array, dimension */
709 /* > LDV x N if JOBV = 'A', 'V', 'R' or if JOBA = 'E' . */
710 /* > If JOBV = 'A', or 'V', V contains the N-by-N unitary matrix V**H; */
711 /* > If JOBV = 'R', V contains the first NUMRANK rows of V**H (the right */
712 /* > singular vectors, stored rowwise, of the NUMRANK largest singular values). */
713 /* > If JOBV = 'N' and JOBA = 'E', V is used as a workspace. */
714 /* > If JOBV = 'N', and JOBA.NE.'E', V is not referenced. */
717 /* > \param[in] LDV */
719 /* > LDV is INTEGER */
720 /* > The leading dimension of the array V. */
721 /* > If JOBV = 'A', 'V', 'R', or JOBA = 'E', LDV >= f2cmax(1,N). */
722 /* > Otherwise, LDV >= 1. */
725 /* > \param[out] NUMRANK */
727 /* > NUMRANK is INTEGER */
728 /* > NUMRANK is the numerical rank first determined after the rank */
729 /* > revealing QR factorization, following the strategy specified by the */
730 /* > value of JOBA. If JOBV = 'R' and JOBU = 'R', only NUMRANK */
731 /* > leading singular values and vectors are then requested in the call */
732 /* > of CGESVD. The final value of NUMRANK might be further reduced if */
733 /* > some singular values are computed as zeros. */
736 /* > \param[out] IWORK */
738 /* > IWORK is INTEGER array, dimension (f2cmax(1, LIWORK)). */
739 /* > On exit, IWORK(1:N) contains column pivoting permutation of the */
740 /* > rank revealing QR factorization. */
741 /* > If JOBP = 'P', IWORK(N+1:N+M-1) contains the indices of the sequence */
742 /* > of row swaps used in row pivoting. These can be used to restore the */
743 /* > left singular vectors in the case JOBU = 'F'. */
745 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
746 /* > LIWORK(1) returns the minimal LIWORK. */
749 /* > \param[in] LIWORK */
751 /* > LIWORK is INTEGER */
752 /* > The dimension of the array IWORK. */
753 /* > LIWORK >= N + M - 1, if JOBP = 'P'; */
754 /* > LIWORK >= N if JOBP = 'N'. */
756 /* > If LIWORK = -1, then a workspace query is assumed; the routine */
757 /* > only calculates and returns the optimal and minimal sizes */
758 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
759 /* > message related to LCWORK is issued by XERBLA. */
762 /* > \param[out] CWORK */
764 /* > CWORK is COMPLEX array, dimension (f2cmax(2, LCWORK)), used as a workspace. */
765 /* > On exit, if, on entry, LCWORK.NE.-1, CWORK(1:N) contains parameters */
766 /* > needed to recover the Q factor from the QR factorization computed by */
769 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
770 /* > CWORK(1) returns the optimal LCWORK, and */
771 /* > CWORK(2) returns the minimal LCWORK. */
774 /* > \param[in,out] LCWORK */
776 /* > LCWORK is INTEGER */
777 /* > The dimension of the array CWORK. It is determined as follows: */
778 /* > Let LWQP3 = N+1, LWCON = 2*N, and let */
779 /* > LWUNQ = { MAX( N, 1 ), if JOBU = 'R', 'S', or 'U' */
780 /* > { MAX( M, 1 ), if JOBU = 'A' */
781 /* > LWSVD = MAX( 3*N, 1 ) */
782 /* > LWLQF = MAX( N/2, 1 ), LWSVD2 = MAX( 3*(N/2), 1 ), LWUNLQ = MAX( N, 1 ), */
783 /* > LWQRF = MAX( N/2, 1 ), LWUNQ2 = MAX( N, 1 ) */
784 /* > Then the minimal value of LCWORK is: */
785 /* > = MAX( N + LWQP3, LWSVD ) if only the singular values are needed; */
786 /* > = MAX( N + LWQP3, LWCON, LWSVD ) if only the singular values are needed, */
787 /* > and a scaled condition estimate requested; */
789 /* > = N + MAX( LWQP3, LWSVD, LWUNQ ) if the singular values and the left */
790 /* > singular vectors are requested; */
791 /* > = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the singular values and the left */
792 /* > singular vectors are requested, and also */
793 /* > a scaled condition estimate requested; */
795 /* > = N + MAX( LWQP3, LWSVD ) if the singular values and the right */
796 /* > singular vectors are requested; */
797 /* > = N + MAX( LWQP3, LWCON, LWSVD ) if the singular values and the right */
798 /* > singular vectors are requested, and also */
799 /* > a scaled condition etimate requested; */
801 /* > = N + MAX( LWQP3, LWSVD, LWUNQ ) if the full SVD is requested with JOBV = 'R'; */
802 /* > independent of JOBR; */
803 /* > = N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ) if the full SVD is requested, */
804 /* > JOBV = 'R' and, also a scaled condition */
805 /* > estimate requested; independent of JOBR; */
806 /* > = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), */
807 /* > N + MAX( LWQP3, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ) ) if the */
808 /* > full SVD is requested with JOBV = 'A' or 'V', and */
810 /* > = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), */
811 /* > N + MAX( LWQP3, LWCON, N/2+LWLQF, N/2+LWSVD2, N/2+LWUNLQ, LWUNQ ) ) */
812 /* > if the full SVD is requested with JOBV = 'A' or 'V', and */
813 /* > JOBR ='N', and also a scaled condition number estimate */
815 /* > = MAX( N + MAX( LWQP3, LWSVD, LWUNQ ), */
816 /* > N + MAX( LWQP3, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) if the */
817 /* > full SVD is requested with JOBV = 'A', 'V', and JOBR ='T' */
818 /* > = MAX( N + MAX( LWQP3, LWCON, LWSVD, LWUNQ ), */
819 /* > N + MAX( LWQP3, LWCON, N/2+LWQRF, N/2+LWSVD2, N/2+LWUNQ2, LWUNQ ) ) */
820 /* > if the full SVD is requested with JOBV = 'A', 'V' and */
821 /* > JOBR ='T', and also a scaled condition number estimate */
823 /* > Finally, LCWORK must be at least two: LCWORK = MAX( 2, LCWORK ). */
825 /* > If LCWORK = -1, then a workspace query is assumed; the routine */
826 /* > only calculates and returns the optimal and minimal sizes */
827 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
828 /* > message related to LCWORK is issued by XERBLA. */
831 /* > \param[out] RWORK */
833 /* > RWORK is REAL array, dimension (f2cmax(1, LRWORK)). */
835 /* > 1. If JOBA = 'E', RWORK(1) contains an estimate of the condition */
836 /* > number of column scaled A. If A = C * D where D is diagonal and C */
837 /* > has unit columns in the Euclidean norm, then, assuming full column rank, */
838 /* > N^(-1/4) * RWORK(1) <= ||pinv(C)||_2 <= N^(1/4) * RWORK(1). */
839 /* > Otherwise, RWORK(1) = -1. */
840 /* > 2. RWORK(2) contains the number of singular values computed as */
841 /* > exact zeros in CGESVD applied to the upper triangular or trapeziodal */
842 /* > R (from the initial QR factorization). In case of early exit (no call to */
843 /* > CGESVD, such as in the case of zero matrix) RWORK(2) = -1. */
845 /* > If LIWORK, LCWORK, or LRWORK = -1, then on exit, if INFO = 0, */
846 /* > RWORK(1) returns the minimal LRWORK. */
849 /* > \param[in] LRWORK */
851 /* > LRWORK is INTEGER. */
852 /* > The dimension of the array RWORK. */
853 /* > If JOBP ='P', then LRWORK >= MAX(2, M, 5*N); */
854 /* > Otherwise, LRWORK >= MAX(2, 5*N). */
856 /* > If LRWORK = -1, then a workspace query is assumed; the routine */
857 /* > only calculates and returns the optimal and minimal sizes */
858 /* > for the CWORK, IWORK, and RWORK arrays, and no error */
859 /* > message related to LCWORK is issued by XERBLA. */
862 /* > \param[out] INFO */
864 /* > INFO is INTEGER */
865 /* > = 0: successful exit. */
866 /* > < 0: if INFO = -i, the i-th argument had an illegal value. */
867 /* > > 0: if CBDSQR did not converge, INFO specifies how many superdiagonals */
868 /* > of an intermediate bidiagonal form B (computed in CGESVD) did not */
869 /* > converge to zero. */
872 /* > \par Further Details: */
873 /* ======================== */
877 /* > 1. The data movement (matrix transpose) is coded using simple nested */
878 /* > DO-loops because BLAS and LAPACK do not provide corresponding subroutines. */
879 /* > Those DO-loops are easily identified in this source code - by the CONTINUE */
880 /* > statements labeled with 11**. In an optimized version of this code, the */
881 /* > nested DO loops should be replaced with calls to an optimized subroutine. */
882 /* > 2. This code scales A by 1/SQRT(M) if the largest ABS(A(i,j)) could cause */
883 /* > column norm overflow. This is the minial precaution and it is left to the */
884 /* > SVD routine (CGESVD) to do its own preemptive scaling if potential over- */
885 /* > or underflows are detected. To avoid repeated scanning of the array A, */
886 /* > an optimal implementation would do all necessary scaling before calling */
887 /* > CGESVD and the scaling in CGESVD can be switched off. */
888 /* > 3. Other comments related to code optimization are given in comments in the */
889 /* > code, enlosed in [[double brackets]]. */
892 /* > \par Bugs, examples and comments */
893 /* =========================== */
896 /* > Please report all bugs and send interesting examples and/or comments to */
897 /* > drmac@math.hr. Thank you. */
900 /* > \par References */
901 /* =============== */
904 /* > [1] Zlatko Drmac, Algorithm 977: A QR-Preconditioned QR SVD Method for */
905 /* > Computing the SVD with High Accuracy. ACM Trans. Math. Softw. */
906 /* > 44(1): 11:1-11:30 (2017) */
908 /* > SIGMA library, xGESVDQ section updated February 2016. */
909 /* > Developed and coded by Zlatko Drmac, Department of Mathematics */
910 /* > University of Zagreb, Croatia, drmac@math.hr */
914 /* > \par Contributors: */
915 /* ================== */
918 /* > Developed and coded by Zlatko Drmac, Department of Mathematics */
919 /* > University of Zagreb, Croatia, drmac@math.hr */
925 /* > \author Univ. of Tennessee */
926 /* > \author Univ. of California Berkeley */
927 /* > \author Univ. of Colorado Denver */
928 /* > \author NAG Ltd. */
930 /* > \date November 2018 */
932 /* > \ingroup complexGEsing */
934 /* ===================================================================== */
935 /* Subroutine */ int cgesvdq_(char *joba, char *jobp, char *jobr, char *jobu,
936 char *jobv, integer *m, integer *n, complex *a, integer *lda, real *s,
937 complex *u, integer *ldu, complex *v, integer *ldv, integer *numrank,
938 integer *iwork, integer *liwork, complex *cwork, integer *lcwork,
939 real *rwork, integer *lrwork, integer *info)
941 /* System generated locals */
942 integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
947 /* Local variables */
948 integer lwrk_cunmqr__, lwrk_cgesvd2__, ierr;
951 integer lwrk_cunmqr2__, optratio;
952 logical lsvc0, accla;
954 logical acclh, acclm;
957 extern logical lsame_(char *, char *);
962 integer lwlqf, lwqrf;
965 logical dntwu, dntwv, wntuf, wntva;
967 logical wntur, wntus, wntvr;
968 extern /* Subroutine */ int cgeqp3_(integer *, integer *, complex *,
969 integer *, integer *, complex *, complex *, integer *, real *,
971 extern real scnrm2_(integer *, complex *, integer *);
972 integer lwsvd2, lwunq2;
973 extern real clange_(char *, integer *, integer *, complex *, integer *,
976 extern /* Subroutine */ int cgelqf_(integer *, integer *, complex *,
977 integer *, complex *, complex *, integer *, integer *), clascl_(
978 char *, integer *, integer *, real *, real *, integer *, integer *
979 , complex *, integer *, integer *);
981 extern /* Subroutine */ int cgeqrf_(integer *, integer *, complex *,
982 integer *, complex *, complex *, integer *, integer *), csscal_(
983 integer *, real *, complex *, integer *);
984 extern real slamch_(char *);
985 extern /* Subroutine */ int cgesvd_(char *, char *, integer *, integer *,
986 complex *, integer *, real *, complex *, integer *, complex *,
987 integer *, complex *, integer *, real *, integer *), clacpy_(char *, integer *, integer *, complex *, integer
988 *, complex *, integer *), claset_(char *, integer *,
989 integer *, complex *, complex *, complex *, integer *),
990 xerbla_(char *, integer *, ftnlen), clapmt_(logical *, integer *,
991 integer *, complex *, integer *, integer *), slascl_(char *,
992 integer *, integer *, real *, real *, integer *, integer *, real *
993 , integer *, integer *), cpocon_(char *, integer *,
994 complex *, integer *, real *, real *, complex *, real *, integer *
996 extern integer isamax_(integer *, real *, integer *);
997 extern /* Subroutine */ int claswp_(integer *, complex *, integer *,
998 integer *, integer *, integer *, integer *), slaset_(char *,
999 integer *, integer *, real *, real *, real *, integer *);
1001 extern /* Subroutine */ int cunmlq_(char *, char *, integer *, integer *,
1002 integer *, complex *, integer *, complex *, complex *, integer *,
1003 complex *, integer *, integer *), cunmqr_(char *,
1004 char *, integer *, integer *, integer *, complex *, integer *,
1005 complex *, complex *, integer *, complex *, integer *, integer *);
1010 integer lwunlq, optwrk;
1015 integer lwrk_cgeqp3__, optwrk2, lwrk_cgelqf__, iminwrk, lwrk_cgeqrf__,
1016 lwrk_cgesvd__, rminwrk, lwrk_cunmlq__;
1019 /* ===================================================================== */
1022 /* Test the input arguments */
1024 /* Parameter adjustments */
1026 a_offset = 1 + a_dim1 * 1;
1030 u_offset = 1 + u_dim1 * 1;
1033 v_offset = 1 + v_dim1 * 1;
1040 wntus = lsame_(jobu, "S") || lsame_(jobu, "U");
1041 wntur = lsame_(jobu, "R");
1042 wntua = lsame_(jobu, "A");
1043 wntuf = lsame_(jobu, "F");
1044 lsvc0 = wntus || wntur || wntua;
1045 lsvec = lsvc0 || wntuf;
1046 dntwu = lsame_(jobu, "N");
1048 wntvr = lsame_(jobv, "R");
1049 wntva = lsame_(jobv, "A") || lsame_(jobv, "V");
1050 rsvec = wntvr || wntva;
1051 dntwv = lsame_(jobv, "N");
1053 accla = lsame_(joba, "A");
1054 acclm = lsame_(joba, "M");
1055 conda = lsame_(joba, "E");
1056 acclh = lsame_(joba, "H") || conda;
1058 rowprm = lsame_(jobp, "P");
1059 rtrans = lsame_(jobr, "T");
1063 i__1 = 1, i__2 = *n + *m - 1;
1064 iminwrk = f2cmax(i__1,i__2);
1066 i__1 = f2cmax(2,*m), i__2 = *n * 5;
1067 rminwrk = f2cmax(i__1,i__2);
1069 iminwrk = f2cmax(1,*n);
1071 i__1 = 2, i__2 = *n * 5;
1072 rminwrk = f2cmax(i__1,i__2);
1074 lquery = *liwork == -1 || *lcwork == -1 || *lrwork == -1;
1076 if (! (accla || acclm || acclh)) {
1078 } else if (! (rowprm || lsame_(jobp, "N"))) {
1080 } else if (! (rtrans || lsame_(jobr, "N"))) {
1082 } else if (! (lsvec || dntwu)) {
1084 } else if (wntur && wntva) {
1086 } else if (! (rsvec || dntwv)) {
1088 } else if (*m < 0) {
1090 } else if (*n < 0 || *n > *m) {
1092 } else if (*lda < f2cmax(1,*m)) {
1094 } else if (*ldu < 1 || lsvc0 && *ldu < *m || wntuf && *ldu < *n) {
1096 } else if (*ldv < 1 || rsvec && *ldv < *n || conda && *ldv < *n) {
1098 } else if (*liwork < iminwrk && ! lquery) {
1105 /* Compute workspace */
1106 /* [[The expressions for computing the minimal and the optimal */
1107 /* values of LCWORK are written with a lot of redundancy and */
1108 /* can be simplified. However, this detailed form is easier for */
1109 /* maintenance and modifications of the code.]] */
1112 if (wntus || wntur) {
1113 lwunq = f2cmax(*n,1);
1115 lwunq = f2cmax(*m,1);
1120 lwsvd = f2cmax(i__1,1);
1122 cgeqp3_(m, n, &a[a_offset], lda, &iwork[1], cdummy, cdummy, &c_n1,
1124 lwrk_cgeqp3__ = (integer) cdummy[0].r;
1125 if (wntus || wntur) {
1126 cunmqr_("L", "N", m, n, n, &a[a_offset], lda, cdummy, &u[
1127 u_offset], ldu, cdummy, &c_n1, &ierr);
1128 lwrk_cunmqr__ = (integer) cdummy[0].r;
1130 cunmqr_("L", "N", m, m, n, &a[a_offset], lda, cdummy, &u[
1131 u_offset], ldu, cdummy, &c_n1, &ierr);
1132 lwrk_cunmqr__ = (integer) cdummy[0].r;
1139 if (! (lsvec || rsvec)) {
1140 /* only the singular values are requested */
1143 i__1 = *n + lwqp3, i__1 = f2cmax(i__1,lwcon);
1144 minwrk = f2cmax(i__1,lwsvd);
1148 minwrk = f2cmax(i__1,lwsvd);
1151 cgesvd_("N", "N", n, n, &a[a_offset], lda, &s[1], &u[u_offset]
1152 , ldu, &v[v_offset], ldv, cdummy, &c_n1, rdummy, &
1154 lwrk_cgesvd__ = (integer) cdummy[0].r;
1157 i__1 = *n + lwrk_cgeqp3__, i__2 = *n + lwcon, i__1 = f2cmax(
1159 optwrk = f2cmax(i__1,lwrk_cgesvd__);
1162 i__1 = *n + lwrk_cgeqp3__;
1163 optwrk = f2cmax(i__1,lwrk_cgesvd__);
1166 } else if (lsvec && ! rsvec) {
1167 /* singular values and the left singular vectors are requested */
1170 i__1 = f2cmax(lwqp3,lwcon), i__1 = f2cmax(i__1,lwsvd);
1171 minwrk = *n + f2cmax(i__1,lwunq);
1174 i__1 = f2cmax(lwqp3,lwsvd);
1175 minwrk = *n + f2cmax(i__1,lwunq);
1179 cgesvd_("N", "O", n, n, &a[a_offset], lda, &s[1], &u[
1180 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1183 cgesvd_("O", "N", n, n, &a[a_offset], lda, &s[1], &u[
1184 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1187 lwrk_cgesvd__ = (integer) cdummy[0].r;
1190 i__1 = f2cmax(lwrk_cgeqp3__,lwcon), i__1 = f2cmax(i__1,
1192 optwrk = *n + f2cmax(i__1,lwrk_cunmqr__);
1195 i__1 = f2cmax(lwrk_cgeqp3__,lwrk_cgesvd__);
1196 optwrk = *n + f2cmax(i__1,lwrk_cunmqr__);
1199 } else if (rsvec && ! lsvec) {
1200 /* singular values and the right singular vectors are requested */
1203 i__1 = f2cmax(lwqp3,lwcon);
1204 minwrk = *n + f2cmax(i__1,lwsvd);
1206 minwrk = *n + f2cmax(lwqp3,lwsvd);
1210 cgesvd_("O", "N", n, n, &a[a_offset], lda, &s[1], &u[
1211 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1214 cgesvd_("N", "O", n, n, &a[a_offset], lda, &s[1], &u[
1215 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1218 lwrk_cgesvd__ = (integer) cdummy[0].r;
1221 i__1 = f2cmax(lwrk_cgeqp3__,lwcon);
1222 optwrk = *n + f2cmax(i__1,lwrk_cgesvd__);
1224 optwrk = *n + f2cmax(lwrk_cgeqp3__,lwrk_cgesvd__);
1228 /* full SVD is requested */
1231 i__1 = f2cmax(lwqp3,lwsvd);
1232 minwrk = f2cmax(i__1,lwunq);
1234 minwrk = f2cmax(minwrk,lwcon);
1240 lwqrf = f2cmax(i__1,1);
1243 lwsvd2 = f2cmax(i__1,1);
1244 lwunq2 = f2cmax(*n,1);
1246 i__1 = lwqp3, i__2 = *n / 2 + lwqrf, i__1 = f2cmax(i__1,i__2)
1247 , i__2 = *n / 2 + lwsvd2, i__1 = f2cmax(i__1,i__2),
1248 i__2 = *n / 2 + lwunq2, i__1 = f2cmax(i__1,i__2);
1249 minwrk2 = f2cmax(i__1,lwunq);
1251 minwrk2 = f2cmax(minwrk2,lwcon);
1253 minwrk2 = *n + minwrk2;
1254 minwrk = f2cmax(minwrk,minwrk2);
1258 i__1 = f2cmax(lwqp3,lwsvd);
1259 minwrk = f2cmax(i__1,lwunq);
1261 minwrk = f2cmax(minwrk,lwcon);
1267 lwlqf = f2cmax(i__1,1);
1270 lwsvd2 = f2cmax(i__1,1);
1271 lwunlq = f2cmax(*n,1);
1273 i__1 = lwqp3, i__2 = *n / 2 + lwlqf, i__1 = f2cmax(i__1,i__2)
1274 , i__2 = *n / 2 + lwsvd2, i__1 = f2cmax(i__1,i__2),
1275 i__2 = *n / 2 + lwunlq, i__1 = f2cmax(i__1,i__2);
1276 minwrk2 = f2cmax(i__1,lwunq);
1278 minwrk2 = f2cmax(minwrk2,lwcon);
1280 minwrk2 = *n + minwrk2;
1281 minwrk = f2cmax(minwrk,minwrk2);
1286 cgesvd_("O", "A", n, n, &a[a_offset], lda, &s[1], &u[
1287 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1289 lwrk_cgesvd__ = (integer) cdummy[0].r;
1291 i__1 = f2cmax(lwrk_cgeqp3__,lwrk_cgesvd__);
1292 optwrk = f2cmax(i__1,lwrk_cunmqr__);
1294 optwrk = f2cmax(optwrk,lwcon);
1296 optwrk = *n + optwrk;
1299 cgeqrf_(n, &i__1, &u[u_offset], ldu, cdummy, cdummy, &
1301 lwrk_cgeqrf__ = (integer) cdummy[0].r;
1304 cgesvd_("S", "O", &i__1, &i__2, &v[v_offset], ldv, &s[
1305 1], &u[u_offset], ldu, &v[v_offset], ldv,
1306 cdummy, &c_n1, rdummy, &ierr);
1307 lwrk_cgesvd2__ = (integer) cdummy[0].r;
1309 cunmqr_("R", "C", n, n, &i__1, &u[u_offset], ldu,
1310 cdummy, &v[v_offset], ldv, cdummy, &c_n1, &
1312 lwrk_cunmqr2__ = (integer) cdummy[0].r;
1314 i__1 = lwrk_cgeqp3__, i__2 = *n / 2 + lwrk_cgeqrf__,
1315 i__1 = f2cmax(i__1,i__2), i__2 = *n / 2 +
1316 lwrk_cgesvd2__, i__1 = f2cmax(i__1,i__2), i__2 =
1317 *n / 2 + lwrk_cunmqr2__;
1318 optwrk2 = f2cmax(i__1,i__2);
1320 optwrk2 = f2cmax(optwrk2,lwcon);
1322 optwrk2 = *n + optwrk2;
1323 optwrk = f2cmax(optwrk,optwrk2);
1326 cgesvd_("S", "O", n, n, &a[a_offset], lda, &s[1], &u[
1327 u_offset], ldu, &v[v_offset], ldv, cdummy, &c_n1,
1329 lwrk_cgesvd__ = (integer) cdummy[0].r;
1331 i__1 = f2cmax(lwrk_cgeqp3__,lwrk_cgesvd__);
1332 optwrk = f2cmax(i__1,lwrk_cunmqr__);
1334 optwrk = f2cmax(optwrk,lwcon);
1336 optwrk = *n + optwrk;
1339 cgelqf_(&i__1, n, &u[u_offset], ldu, cdummy, cdummy, &
1341 lwrk_cgelqf__ = (integer) cdummy[0].r;
1344 cgesvd_("S", "O", &i__1, &i__2, &v[v_offset], ldv, &s[
1345 1], &u[u_offset], ldu, &v[v_offset], ldv,
1346 cdummy, &c_n1, rdummy, &ierr);
1347 lwrk_cgesvd2__ = (integer) cdummy[0].r;
1349 cunmlq_("R", "N", n, n, &i__1, &u[u_offset], ldu,
1350 cdummy, &v[v_offset], ldv, cdummy, &c_n1, &
1352 lwrk_cunmlq__ = (integer) cdummy[0].r;
1354 i__1 = lwrk_cgeqp3__, i__2 = *n / 2 + lwrk_cgelqf__,
1355 i__1 = f2cmax(i__1,i__2), i__2 = *n / 2 +
1356 lwrk_cgesvd2__, i__1 = f2cmax(i__1,i__2), i__2 =
1357 *n / 2 + lwrk_cunmlq__;
1358 optwrk2 = f2cmax(i__1,i__2);
1360 optwrk2 = f2cmax(optwrk2,lwcon);
1362 optwrk2 = *n + optwrk2;
1363 optwrk = f2cmax(optwrk,optwrk2);
1369 minwrk = f2cmax(2,minwrk);
1370 optwrk = f2cmax(2,optwrk);
1371 if (*lcwork < minwrk && ! lquery) {
1377 if (*info == 0 && *lrwork < rminwrk && ! lquery) {
1382 xerbla_("CGESVDQ", &i__1, (ftnlen)7);
1384 } else if (lquery) {
1386 /* Return optimal workspace */
1389 cwork[1].r = (real) optwrk, cwork[1].i = 0.f;
1390 cwork[2].r = (real) minwrk, cwork[2].i = 0.f;
1391 rwork[1] = (real) rminwrk;
1395 /* Quick return if the matrix is void. */
1397 if (*m == 0 || *n == 0) {
1404 /* ell-infinity norm - this enhances numerical robustness in */
1405 /* the case of differently scaled rows. */
1407 for (p = 1; p <= i__1; ++p) {
1408 /* RWORK(p) = ABS( A(p,ICAMAX(N,A(p,1),LDA)) ) */
1409 /* [[CLANGE will return NaN if an entry of the p-th row is Nan]] */
1410 rwork[p] = clange_("M", &c__1, n, &a[p + a_dim1], lda, rdummy);
1411 if (rwork[p] != rwork[p] || rwork[p] * 0.f != 0.f) {
1414 xerbla_("CGESVDQ", &i__2, (ftnlen)7);
1420 for (p = 1; p <= i__1; ++p) {
1422 q = isamax_(&i__2, &rwork[p], &c__1) + p - 1;
1426 rwork[p] = rwork[q];
1432 if (rwork[1] == 0.f) {
1433 /* Quick return: A is the M x N zero matrix. */
1435 slaset_("G", n, &c__1, &c_b74, &c_b74, &s[1], n);
1437 claset_("G", m, n, &c_b1, &c_b2, &u[u_offset], ldu)
1441 claset_("G", m, m, &c_b1, &c_b2, &u[u_offset], ldu)
1445 claset_("G", n, n, &c_b1, &c_b2, &v[v_offset], ldv)
1449 claset_("G", n, &c__1, &c_b1, &c_b1, &cwork[1], n);
1450 claset_("G", m, n, &c_b1, &c_b2, &u[u_offset], ldu)
1454 for (p = 1; p <= i__1; ++p) {
1460 for (p = *n + 1; p <= i__1; ++p) {
1472 if (rwork[1] > big / sqrt((real) (*m))) {
1473 /* matrix by 1/sqrt(M) if too large entry detected */
1474 r__1 = sqrt((real) (*m));
1475 clascl_("G", &c__0, &c__0, &r__1, &c_b87, m, n, &a[a_offset], lda,
1480 claswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[*n + 1], &c__1);
1483 /* norms overflows during the QR factorization. The SVD procedure should */
1484 /* have its own scaling to save the singular values from overflows and */
1485 /* underflows. That depends on the SVD procedure. */
1488 rtmp = clange_("M", m, n, &a[a_offset], lda, &rwork[1]);
1489 if (rtmp != rtmp || rtmp * 0.f != 0.f) {
1492 xerbla_("CGESVDQ", &i__1, (ftnlen)7);
1495 if (rtmp > big / sqrt((real) (*m))) {
1496 /* matrix by 1/sqrt(M) if too large entry detected */
1497 r__1 = sqrt((real) (*m));
1498 clascl_("G", &c__0, &c__0, &r__1, &c_b87, m, n, &a[a_offset], lda,
1505 /* A * P = Q * [ R ] */
1509 for (p = 1; p <= i__1; ++p) {
1513 i__1 = *lcwork - *n;
1514 cgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &cwork[1], &cwork[*n + 1], &
1515 i__1, &rwork[1], &ierr);
1517 /* If the user requested accuracy level allows truncation in the */
1518 /* computed upper triangular factor, the matrix R is examined and, */
1519 /* if possible, replaced with its leading upper trapezoidal part. */
1521 epsln = slamch_("E");
1522 sfmin = slamch_("S");
1523 /* SMALL = SFMIN / EPSLN */
1528 /* Standard absolute error bound suffices. All sigma_i with */
1529 /* sigma_i < N*EPS*||A||_F are flushed to zero. This is an */
1530 /* aggressive enforcement of lower numerical rank by introducing a */
1531 /* backward error of the order of N*EPS*||A||_F. */
1533 rtmp = sqrt((real) (*n)) * epsln;
1535 for (p = 2; p <= i__1; ++p) {
1536 if (c_abs(&a[p + p * a_dim1]) < rtmp * c_abs(&a[a_dim1 + 1])) {
1546 /* Sudden drop on the diagonal of R is used as the criterion for being */
1547 /* close-to-rank-deficient. The threshold is set to EPSLN=SLAMCH('E'). */
1548 /* [[This can be made more flexible by replacing this hard-coded value */
1549 /* with a user specified threshold.]] Also, the values that underflow */
1550 /* will be truncated. */
1553 for (p = 2; p <= i__1; ++p) {
1554 if (c_abs(&a[p + p * a_dim1]) < epsln * c_abs(&a[p - 1 + (p - 1) *
1555 a_dim1]) || c_abs(&a[p + p * a_dim1]) < sfmin) {
1565 /* obvious case of zero pivots. */
1566 /* R(i,i)=0 => R(i:N,i:N)=0. */
1569 for (p = 2; p <= i__1; ++p) {
1570 if (c_abs(&a[p + p * a_dim1]) == 0.f) {
1579 /* Estimate the scaled condition number of A. Use the fact that it is */
1580 /* the same as the scaled condition number of R. */
1581 clacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
1582 /* Only the leading NR x NR submatrix of the triangular factor */
1583 /* is considered. Only if NR=N will this give a reliable error */
1584 /* bound. However, even for NR < N, this can be used on an */
1585 /* expert level and obtain useful information in the sense of */
1586 /* perturbation theory. */
1588 for (p = 1; p <= i__1; ++p) {
1589 rtmp = scnrm2_(&p, &v[p * v_dim1 + 1], &c__1);
1591 csscal_(&p, &r__1, &v[p * v_dim1 + 1], &c__1);
1594 if (! (lsvec || rsvec)) {
1595 cpocon_("U", &nr, &v[v_offset], ldv, &c_b87, &rtmp, &cwork[1],
1598 cpocon_("U", &nr, &v[v_offset], ldv, &c_b87, &rtmp, &cwork[*n
1599 + 1], &rwork[1], &ierr);
1601 sconda = 1.f / sqrt(rtmp);
1602 /* For NR=N, SCONDA is an estimate of SQRT(||(R^* * R)^(-1)||_1), */
1603 /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
1604 /* See the reference [1] for more details. */
1611 } else if (wntus || wntuf) {
1617 if (! (rsvec || lsvec)) {
1618 /* ....................................................................... */
1619 /* ....................................................................... */
1622 /* the upper triangle of [A] to zero. */
1623 i__1 = f2cmin(*n,nr);
1624 for (p = 1; p <= i__1; ++p) {
1625 i__2 = p + p * a_dim1;
1626 r_cnjg(&q__1, &a[p + p * a_dim1]);
1627 a[i__2].r = q__1.r, a[i__2].i = q__1.i;
1629 for (q = p + 1; q <= i__2; ++q) {
1630 i__3 = q + p * a_dim1;
1631 r_cnjg(&q__1, &a[p + q * a_dim1]);
1632 a[i__3].r = q__1.r, a[i__3].i = q__1.i;
1634 i__3 = p + q * a_dim1;
1635 a[i__3].r = 0.f, a[i__3].i = 0.f;
1642 cgesvd_("N", "N", n, &nr, &a[a_offset], lda, &s[1], &u[u_offset],
1643 ldu, &v[v_offset], ldv, &cwork[1], lcwork, &rwork[1],
1652 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &a[a_dim1 + 2], lda);
1654 cgesvd_("N", "N", &nr, n, &a[a_offset], lda, &s[1], &u[u_offset],
1655 ldu, &v[v_offset], ldv, &cwork[1], lcwork, &rwork[1],
1660 } else if (lsvec && ! rsvec) {
1661 /* ....................................................................... */
1662 /* ......................................................................."""""""" */
1666 for (p = 1; p <= i__1; ++p) {
1668 for (q = p; q <= i__2; ++q) {
1669 i__3 = q + p * u_dim1;
1670 r_cnjg(&q__1, &a[p + q * a_dim1]);
1671 u[i__3].r = q__1.r, u[i__3].i = q__1.i;
1679 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(u_dim1 << 1) + 1]
1682 /* vectors overwrite [U](1:NR,1:NR) as conjugate transposed. These */
1683 /* will be pre-multiplied by Q to build the left singular vectors of A. */
1684 i__1 = *lcwork - *n;
1685 cgesvd_("N", "O", n, &nr, &u[u_offset], ldu, &s[1], &u[u_offset],
1686 ldu, &u[u_offset], ldu, &cwork[*n + 1], &i__1, &rwork[1],
1690 for (p = 1; p <= i__1; ++p) {
1691 i__2 = p + p * u_dim1;
1692 r_cnjg(&q__1, &u[p + p * u_dim1]);
1693 u[i__2].r = q__1.r, u[i__2].i = q__1.i;
1695 for (q = p + 1; q <= i__2; ++q) {
1696 r_cnjg(&q__1, &u[q + p * u_dim1]);
1697 ctmp.r = q__1.r, ctmp.i = q__1.i;
1698 i__3 = q + p * u_dim1;
1699 r_cnjg(&q__1, &u[p + q * u_dim1]);
1700 u[i__3].r = q__1.r, u[i__3].i = q__1.i;
1701 i__3 = p + q * u_dim1;
1702 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
1709 clacpy_("U", &nr, n, &a[a_offset], lda, &u[u_offset], ldu);
1713 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &u[u_dim1 + 2], ldu);
1715 /* vectors overwrite [U](1:NR,1:NR) */
1716 i__1 = *lcwork - *n;
1717 cgesvd_("O", "N", &nr, n, &u[u_offset], ldu, &s[1], &u[u_offset],
1718 ldu, &v[v_offset], ldv, &cwork[*n + 1], &i__1, &rwork[1],
1720 /* R. These will be pre-multiplied by Q to build the left singular */
1724 /* (M x NR) or (M x N) or (M x M). */
1725 if (nr < *m && ! wntuf) {
1727 claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1], ldu);
1730 claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) * u_dim1 +
1734 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (nr + 1)
1739 /* The Q matrix from the first QRF is built into the left singular */
1740 /* vectors matrix U. */
1743 i__1 = *lcwork - *n;
1744 cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
1745 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
1747 if (rowprm && ! wntuf) {
1749 claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[*n + 1], &
1753 } else if (rsvec && ! lsvec) {
1754 /* ....................................................................... */
1755 /* ....................................................................... */
1758 for (p = 1; p <= i__1; ++p) {
1760 for (q = p; q <= i__2; ++q) {
1761 i__3 = q + p * v_dim1;
1762 r_cnjg(&q__1, &a[p + q * a_dim1]);
1763 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1771 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1) + 1]
1774 /* vectors not computed */
1775 if (wntvr || nr == *n) {
1776 i__1 = *lcwork - *n;
1777 cgesvd_("O", "N", n, &nr, &v[v_offset], ldv, &s[1], &u[
1778 u_offset], ldu, &u[u_offset], ldu, &cwork[*n + 1], &
1779 i__1, &rwork[1], info);
1782 for (p = 1; p <= i__1; ++p) {
1783 i__2 = p + p * v_dim1;
1784 r_cnjg(&q__1, &v[p + p * v_dim1]);
1785 v[i__2].r = q__1.r, v[i__2].i = q__1.i;
1787 for (q = p + 1; q <= i__2; ++q) {
1788 r_cnjg(&q__1, &v[q + p * v_dim1]);
1789 ctmp.r = q__1.r, ctmp.i = q__1.i;
1790 i__3 = q + p * v_dim1;
1791 r_cnjg(&q__1, &v[p + q * v_dim1]);
1792 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1793 i__3 = p + q * v_dim1;
1794 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1802 for (p = 1; p <= i__1; ++p) {
1804 for (q = nr + 1; q <= i__2; ++q) {
1805 i__3 = p + q * v_dim1;
1806 r_cnjg(&q__1, &v[q + p * v_dim1]);
1807 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1813 clapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1815 /* [!] This is simple implementation that augments [V](1:N,1:NR) */
1816 /* by padding a zero block. In the case NR << N, a more efficient */
1817 /* way is to first use the QR factorization. For more details */
1818 /* how to implement this, see the " FULL SVD " branch. */
1820 claset_("G", n, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1 + 1]
1822 i__1 = *lcwork - *n;
1823 cgesvd_("O", "N", n, n, &v[v_offset], ldv, &s[1], &u[u_offset]
1824 , ldu, &u[u_offset], ldu, &cwork[*n + 1], &i__1, &
1828 for (p = 1; p <= i__1; ++p) {
1829 i__2 = p + p * v_dim1;
1830 r_cnjg(&q__1, &v[p + p * v_dim1]);
1831 v[i__2].r = q__1.r, v[i__2].i = q__1.i;
1833 for (q = p + 1; q <= i__2; ++q) {
1834 r_cnjg(&q__1, &v[q + p * v_dim1]);
1835 ctmp.r = q__1.r, ctmp.i = q__1.i;
1836 i__3 = q + p * v_dim1;
1837 r_cnjg(&q__1, &v[p + q * v_dim1]);
1838 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1839 i__3 = p + q * v_dim1;
1840 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1845 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
1849 clacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
1853 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 + 2], ldv);
1855 /* vectors stored in U(1:NR,1:NR) */
1856 if (wntvr || nr == *n) {
1857 i__1 = *lcwork - *n;
1858 cgesvd_("N", "O", &nr, n, &v[v_offset], ldv, &s[1], &u[
1859 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1], &
1860 i__1, &rwork[1], info);
1861 clapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1863 /* [!] This is simple implementation that augments [V](1:NR,1:N) */
1864 /* by padding a zero block. In the case NR << N, a more efficient */
1865 /* way is to first use the LQ factorization. For more details */
1866 /* how to implement this, see the " FULL SVD " branch. */
1868 claset_("G", &i__1, n, &c_b1, &c_b1, &v[nr + 1 + v_dim1], ldv);
1869 i__1 = *lcwork - *n;
1870 cgesvd_("N", "O", n, n, &v[v_offset], ldv, &s[1], &u[u_offset]
1871 , ldu, &v[v_offset], ldv, &cwork[*n + 1], &i__1, &
1873 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
1879 /* ....................................................................... */
1880 /* ....................................................................... */
1884 if (wntvr || nr == *n) {
1885 /* vectors of R**H */
1887 for (p = 1; p <= i__1; ++p) {
1889 for (q = p; q <= i__2; ++q) {
1890 i__3 = q + p * v_dim1;
1891 r_cnjg(&q__1, &a[p + q * a_dim1]);
1892 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1900 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
1904 /* singular vectors of R**H stored in [U](1:NR,1:NR) as conjugate */
1906 i__1 = *lcwork - *n;
1907 cgesvd_("O", "A", n, &nr, &v[v_offset], ldv, &s[1], &v[
1908 v_offset], ldv, &u[u_offset], ldu, &cwork[*n + 1], &
1909 i__1, &rwork[1], info);
1911 for (p = 1; p <= i__1; ++p) {
1912 i__2 = p + p * v_dim1;
1913 r_cnjg(&q__1, &v[p + p * v_dim1]);
1914 v[i__2].r = q__1.r, v[i__2].i = q__1.i;
1916 for (q = p + 1; q <= i__2; ++q) {
1917 r_cnjg(&q__1, &v[q + p * v_dim1]);
1918 ctmp.r = q__1.r, ctmp.i = q__1.i;
1919 i__3 = q + p * v_dim1;
1920 r_cnjg(&q__1, &v[p + q * v_dim1]);
1921 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1922 i__3 = p + q * v_dim1;
1923 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
1930 for (p = 1; p <= i__1; ++p) {
1932 for (q = nr + 1; q <= i__2; ++q) {
1933 i__3 = p + q * v_dim1;
1934 r_cnjg(&q__1, &v[q + p * v_dim1]);
1935 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
1941 clapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
1944 for (p = 1; p <= i__1; ++p) {
1945 i__2 = p + p * u_dim1;
1946 r_cnjg(&q__1, &u[p + p * u_dim1]);
1947 u[i__2].r = q__1.r, u[i__2].i = q__1.i;
1949 for (q = p + 1; q <= i__2; ++q) {
1950 r_cnjg(&q__1, &u[q + p * u_dim1]);
1951 ctmp.r = q__1.r, ctmp.i = q__1.i;
1952 i__3 = q + p * u_dim1;
1953 r_cnjg(&q__1, &u[p + q * u_dim1]);
1954 u[i__3].r = q__1.r, u[i__3].i = q__1.i;
1955 i__3 = p + q * u_dim1;
1956 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
1962 if (nr < *m && ! wntuf) {
1964 claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
1968 claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
1972 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
1973 nr + 1) * u_dim1], ldu);
1978 /* vectors of R**H */
1979 /* [[The optimal ratio N/NR for using QRF instead of padding */
1980 /* with zeros. Here hard coded to 2; it must be at least */
1981 /* two due to work space constraints.]] */
1982 /* OPTRATIO = ILAENV(6, 'CGESVD', 'S' // 'O', NR,N,0,0) */
1983 /* OPTRATIO = MAX( OPTRATIO, 2 ) */
1985 if (optratio * nr > *n) {
1987 for (p = 1; p <= i__1; ++p) {
1989 for (q = p; q <= i__2; ++q) {
1990 i__3 = q + p * v_dim1;
1991 r_cnjg(&q__1, &a[p + q * a_dim1]);
1992 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
2000 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 <<
2005 claset_("A", n, &i__1, &c_b1, &c_b1, &v[(nr + 1) * v_dim1
2007 i__1 = *lcwork - *n;
2008 cgesvd_("O", "A", n, n, &v[v_offset], ldv, &s[1], &v[
2009 v_offset], ldv, &u[u_offset], ldu, &cwork[*n + 1],
2010 &i__1, &rwork[1], info);
2013 for (p = 1; p <= i__1; ++p) {
2014 i__2 = p + p * v_dim1;
2015 r_cnjg(&q__1, &v[p + p * v_dim1]);
2016 v[i__2].r = q__1.r, v[i__2].i = q__1.i;
2018 for (q = p + 1; q <= i__2; ++q) {
2019 r_cnjg(&q__1, &v[q + p * v_dim1]);
2020 ctmp.r = q__1.r, ctmp.i = q__1.i;
2021 i__3 = q + p * v_dim1;
2022 r_cnjg(&q__1, &v[p + q * v_dim1]);
2023 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
2024 i__3 = p + q * v_dim1;
2025 v[i__3].r = ctmp.r, v[i__3].i = ctmp.i;
2030 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2031 /* (M x N1), i.e. (M x N) or (M x M). */
2034 for (p = 1; p <= i__1; ++p) {
2035 i__2 = p + p * u_dim1;
2036 r_cnjg(&q__1, &u[p + p * u_dim1]);
2037 u[i__2].r = q__1.r, u[i__2].i = q__1.i;
2039 for (q = p + 1; q <= i__2; ++q) {
2040 r_cnjg(&q__1, &u[q + p * u_dim1]);
2041 ctmp.r = q__1.r, ctmp.i = q__1.i;
2042 i__3 = q + p * u_dim1;
2043 r_cnjg(&q__1, &u[p + q * u_dim1]);
2044 u[i__3].r = q__1.r, u[i__3].i = q__1.i;
2045 i__3 = p + q * u_dim1;
2046 u[i__3].r = ctmp.r, u[i__3].i = ctmp.i;
2052 if (*n < *m && ! wntuf) {
2054 claset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 +
2058 claset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
2062 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n +
2063 1 + (*n + 1) * u_dim1], ldu);
2067 /* singular vectors of R */
2069 for (p = 1; p <= i__1; ++p) {
2071 for (q = p; q <= i__2; ++q) {
2072 i__3 = q + (nr + p) * u_dim1;
2073 r_cnjg(&q__1, &a[p + q * a_dim1]);
2074 u[i__3].r = q__1.r, u[i__3].i = q__1.i;
2082 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &u[(nr + 2) *
2085 i__1 = *lcwork - *n - nr;
2086 cgeqrf_(n, &nr, &u[(nr + 1) * u_dim1 + 1], ldu, &cwork[*n
2087 + 1], &cwork[*n + nr + 1], &i__1, &ierr);
2089 for (p = 1; p <= i__1; ++p) {
2091 for (q = 1; q <= i__2; ++q) {
2092 i__3 = q + p * v_dim1;
2093 r_cnjg(&q__1, &u[p + (nr + q) * u_dim1]);
2094 v[i__3].r = q__1.r, v[i__3].i = q__1.i;
2101 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 << 1)
2103 i__1 = *lcwork - *n - nr;
2104 cgesvd_("S", "O", &nr, &nr, &v[v_offset], ldv, &s[1], &u[
2105 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + nr
2106 + 1], &i__1, &rwork[1], info);
2108 claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1]
2111 claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
2115 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr
2116 + 1) * v_dim1], ldv);
2117 i__1 = *lcwork - *n - nr;
2118 cunmqr_("R", "C", n, n, &nr, &u[(nr + 1) * u_dim1 + 1],
2119 ldu, &cwork[*n + 1], &v[v_offset], ldv, &cwork[*n
2120 + nr + 1], &i__1, &ierr);
2121 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2122 /* (M x NR) or (M x N) or (M x M). */
2123 if (nr < *m && ! wntuf) {
2125 claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 +
2129 claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1)
2130 * u_dim1 + 1], ldu);
2133 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr +
2134 1 + (nr + 1) * u_dim1], ldu);
2143 if (wntvr || nr == *n) {
2144 clacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
2148 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 + 2],
2151 /* singular vectors of R stored in [U](1:NR,1:NR) */
2152 i__1 = *lcwork - *n;
2153 cgesvd_("S", "O", &nr, n, &v[v_offset], ldv, &s[1], &u[
2154 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1], &
2155 i__1, &rwork[1], info);
2156 clapmt_(&c_false, &nr, n, &v[v_offset], ldv, &iwork[1]);
2157 /* (M x NR) or (M x N) or (M x M). */
2158 if (nr < *m && ! wntuf) {
2160 claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 + u_dim1]
2164 claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1) *
2168 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr + 1 + (
2169 nr + 1) * u_dim1], ldu);
2174 /* is then N1 (N or M) */
2175 /* [[The optimal ratio N/NR for using LQ instead of padding */
2176 /* with zeros. Here hard coded to 2; it must be at least */
2177 /* two due to work space constraints.]] */
2178 /* OPTRATIO = ILAENV(6, 'CGESVD', 'S' // 'O', NR,N,0,0) */
2179 /* OPTRATIO = MAX( OPTRATIO, 2 ) */
2181 if (optratio * nr > *n) {
2182 clacpy_("U", &nr, n, &a[a_offset], lda, &v[v_offset], ldv);
2186 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &v[v_dim1 +
2189 /* singular vectors of R stored in [U](1:NR,1:NR) */
2191 claset_("A", &i__1, n, &c_b1, &c_b1, &v[nr + 1 + v_dim1],
2193 i__1 = *lcwork - *n;
2194 cgesvd_("S", "O", n, n, &v[v_offset], ldv, &s[1], &u[
2195 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + 1],
2196 &i__1, &rwork[1], info);
2197 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2198 /* singular vectors of A. The leading N left singular vectors */
2199 /* are in [U](1:N,1:N) */
2200 /* (M x N1), i.e. (M x N) or (M x M). */
2201 if (*n < *m && ! wntuf) {
2203 claset_("A", &i__1, n, &c_b1, &c_b1, &u[*n + 1 +
2207 claset_("A", n, &i__1, &c_b1, &c_b1, &u[(*n + 1) *
2211 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[*n +
2212 1 + (*n + 1) * u_dim1], ldu);
2216 clacpy_("U", &nr, n, &a[a_offset], lda, &u[nr + 1 +
2221 claset_("L", &i__1, &i__2, &c_b1, &c_b1, &u[nr + 2 +
2224 i__1 = *lcwork - *n - nr;
2225 cgelqf_(&nr, n, &u[nr + 1 + u_dim1], ldu, &cwork[*n + 1],
2226 &cwork[*n + nr + 1], &i__1, &ierr);
2227 clacpy_("L", &nr, &nr, &u[nr + 1 + u_dim1], ldu, &v[
2232 claset_("U", &i__1, &i__2, &c_b1, &c_b1, &v[(v_dim1 <<
2235 i__1 = *lcwork - *n - nr;
2236 cgesvd_("S", "O", &nr, &nr, &v[v_offset], ldv, &s[1], &u[
2237 u_offset], ldu, &v[v_offset], ldv, &cwork[*n + nr
2238 + 1], &i__1, &rwork[1], info);
2240 claset_("A", &i__1, &nr, &c_b1, &c_b1, &v[nr + 1 + v_dim1]
2243 claset_("A", &nr, &i__1, &c_b1, &c_b1, &v[(nr + 1) *
2247 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &v[nr + 1 + (nr
2248 + 1) * v_dim1], ldv);
2249 i__1 = *lcwork - *n - nr;
2250 cunmlq_("R", "N", n, n, &nr, &u[nr + 1 + u_dim1], ldu, &
2251 cwork[*n + 1], &v[v_offset], ldv, &cwork[*n + nr
2252 + 1], &i__1, &ierr);
2253 clapmt_(&c_false, n, n, &v[v_offset], ldv, &iwork[1]);
2254 /* (M x NR) or (M x N) or (M x M). */
2255 if (nr < *m && ! wntuf) {
2257 claset_("A", &i__1, &nr, &c_b1, &c_b1, &u[nr + 1 +
2261 claset_("A", &nr, &i__1, &c_b1, &c_b1, &u[(nr + 1)
2262 * u_dim1 + 1], ldu);
2265 claset_("A", &i__1, &i__2, &c_b1, &c_b2, &u[nr +
2266 1 + (nr + 1) * u_dim1], ldu);
2273 /* The Q matrix from the first QRF is built into the left singular */
2274 /* vectors matrix U. */
2277 i__1 = *lcwork - *n;
2278 cunmqr_("L", "N", m, &n1, n, &a[a_offset], lda, &cwork[1], &u[
2279 u_offset], ldu, &cwork[*n + 1], &i__1, &ierr);
2281 if (rowprm && ! wntuf) {
2283 claswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[*n + 1], &
2287 /* ... end of the "full SVD" branch */
2290 /* Check whether some singular values are returned as zeros, e.g. */
2291 /* due to underflow, and update the numerical rank. */
2293 for (q = p; q >= 1; --q) {
2302 /* singular values are set to zero. */
2305 slaset_("G", &i__1, &c__1, &c_b74, &c_b74, &s[nr + 1], n);
2309 r__1 = sqrt((real) (*m));
2310 slascl_("G", &c__0, &c__0, &c_b87, &r__1, &nr, &c__1, &s[1], n, &ierr);
2315 rwork[2] = (real) (p - nr);
2316 /* exact zeros in CGESVD() applied to the (possibly truncated) */
2317 /* full row rank triangular (trapezoidal) factor of A. */
2322 /* End of CGESVDQ */