14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static integer c__1 = 1;
516 static doublereal c_b34 = 0.;
517 static doublereal c_b35 = 1.;
518 static integer c__0 = 0;
519 static integer c_n1 = -1;
521 /* > \brief \b DGEJSV */
523 /* =========== DOCUMENTATION =========== */
525 /* Online html documentation available at */
526 /* http://www.netlib.org/lapack/explore-html/ */
529 /* > Download DGEJSV + dependencies */
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dgejsv.
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dgejsv.
536 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dgejsv.
544 /* SUBROUTINE DGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP, */
545 /* M, N, A, LDA, SVA, U, LDU, V, LDV, */
546 /* WORK, LWORK, IWORK, INFO ) */
549 /* INTEGER INFO, LDA, LDU, LDV, LWORK, M, N */
550 /* DOUBLE PRECISION A( LDA, * ), SVA( N ), U( LDU, * ), V( LDV, * ), */
551 /* $ WORK( LWORK ) */
552 /* INTEGER IWORK( * ) */
553 /* CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV */
556 /* > \par Purpose: */
561 /* > DGEJSV computes the singular value decomposition (SVD) of a real M-by-N */
562 /* > matrix [A], where M >= N. The SVD of [A] is written as */
564 /* > [A] = [U] * [SIGMA] * [V]^t, */
566 /* > where [SIGMA] is an N-by-N (M-by-N) matrix which is zero except for its N */
567 /* > diagonal elements, [U] is an M-by-N (or M-by-M) orthonormal matrix, and */
568 /* > [V] is an N-by-N orthogonal matrix. The diagonal elements of [SIGMA] are */
569 /* > the singular values of [A]. The columns of [U] and [V] are the left and */
570 /* > the right singular vectors of [A], respectively. The matrices [U] and [V] */
571 /* > are computed and stored in the arrays U and V, respectively. The diagonal */
572 /* > of [SIGMA] is computed and stored in the array SVA. */
573 /* > DGEJSV can sometimes compute tiny singular values and their singular vectors much */
574 /* > more accurately than other SVD routines, see below under Further Details. */
580 /* > \param[in] JOBA */
582 /* > JOBA is CHARACTER*1 */
583 /* > Specifies the level of accuracy: */
584 /* > = 'C': This option works well (high relative accuracy) if A = B * D, */
585 /* > with well-conditioned B and arbitrary diagonal matrix D. */
586 /* > The accuracy cannot be spoiled by COLUMN scaling. The */
587 /* > accuracy of the computed output depends on the condition of */
588 /* > B, and the procedure aims at the best theoretical accuracy. */
589 /* > The relative error max_{i=1:N}|d sigma_i| / sigma_i is */
590 /* > bounded by f(M,N)*epsilon* cond(B), independent of D. */
591 /* > The input matrix is preprocessed with the QRF with column */
592 /* > pivoting. This initial preprocessing and preconditioning by */
593 /* > a rank revealing QR factorization is common for all values of */
594 /* > JOBA. Additional actions are specified as follows: */
595 /* > = 'E': Computation as with 'C' with an additional estimate of the */
596 /* > condition number of B. It provides a realistic error bound. */
597 /* > = 'F': If A = D1 * C * D2 with ill-conditioned diagonal scalings */
598 /* > D1, D2, and well-conditioned matrix C, this option gives */
599 /* > higher accuracy than the 'C' option. If the structure of the */
600 /* > input matrix is not known, and relative accuracy is */
601 /* > desirable, then this option is advisable. The input matrix A */
602 /* > is preprocessed with QR factorization with FULL (row and */
603 /* > column) pivoting. */
604 /* > = 'G': Computation as with 'F' with an additional estimate of the */
605 /* > condition number of B, where A=D*B. If A has heavily weighted */
606 /* > rows, then using this condition number gives too pessimistic */
608 /* > = 'A': Small singular values are the noise and the matrix is treated */
609 /* > as numerically rank deficient. The error in the computed */
610 /* > singular values is bounded by f(m,n)*epsilon*||A||. */
611 /* > The computed SVD A = U * S * V^t restores A up to */
612 /* > f(m,n)*epsilon*||A||. */
613 /* > This gives the procedure the licence to discard (set to zero) */
614 /* > all singular values below N*epsilon*||A||. */
615 /* > = 'R': Similar as in 'A'. Rank revealing property of the initial */
616 /* > QR factorization is used do reveal (using triangular factor) */
617 /* > a gap sigma_{r+1} < epsilon * sigma_r in which case the */
618 /* > numerical RANK is declared to be r. The SVD is computed with */
619 /* > absolute error bounds, but more accurately than with 'A'. */
622 /* > \param[in] JOBU */
624 /* > JOBU is CHARACTER*1 */
625 /* > Specifies whether to compute the columns of U: */
626 /* > = 'U': N columns of U are returned in the array U. */
627 /* > = 'F': full set of M left sing. vectors is returned in the array U. */
628 /* > = 'W': U may be used as workspace of length M*N. See the description */
630 /* > = 'N': U is not computed. */
633 /* > \param[in] JOBV */
635 /* > JOBV is CHARACTER*1 */
636 /* > Specifies whether to compute the matrix V: */
637 /* > = 'V': N columns of V are returned in the array V; Jacobi rotations */
638 /* > are not explicitly accumulated. */
639 /* > = 'J': N columns of V are returned in the array V, but they are */
640 /* > computed as the product of Jacobi rotations. This option is */
641 /* > allowed only if JOBU .NE. 'N', i.e. in computing the full SVD. */
642 /* > = 'W': V may be used as workspace of length N*N. See the description */
644 /* > = 'N': V is not computed. */
647 /* > \param[in] JOBR */
649 /* > JOBR is CHARACTER*1 */
650 /* > Specifies the RANGE for the singular values. Issues the licence to */
651 /* > set to zero small positive singular values if they are outside */
652 /* > specified range. If A .NE. 0 is scaled so that the largest singular */
653 /* > value of c*A is around DSQRT(BIG), BIG=SLAMCH('O'), then JOBR issues */
654 /* > the licence to kill columns of A whose norm in c*A is less than */
655 /* > DSQRT(SFMIN) (for JOBR = 'R'), or less than SMALL=SFMIN/EPSLN, */
656 /* > where SFMIN=SLAMCH('S'), EPSLN=SLAMCH('E'). */
657 /* > = 'N': Do not kill small columns of c*A. This option assumes that */
658 /* > BLAS and QR factorizations and triangular solvers are */
659 /* > implemented to work in that range. If the condition of A */
660 /* > is greater than BIG, use DGESVJ. */
661 /* > = 'R': RESTRICTED range for sigma(c*A) is [DSQRT(SFMIN), DSQRT(BIG)] */
662 /* > (roughly, as described above). This option is recommended. */
663 /* > ~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
664 /* > For computing the singular values in the FULL range [SFMIN,BIG] */
668 /* > \param[in] JOBT */
670 /* > JOBT is CHARACTER*1 */
671 /* > If the matrix is square then the procedure may determine to use */
672 /* > transposed A if A^t seems to be better with respect to convergence. */
673 /* > If the matrix is not square, JOBT is ignored. This is subject to */
674 /* > changes in the future. */
675 /* > The decision is based on two values of entropy over the adjoint */
676 /* > orbit of A^t * A. See the descriptions of WORK(6) and WORK(7). */
677 /* > = 'T': transpose if entropy test indicates possibly faster */
678 /* > convergence of Jacobi process if A^t is taken as input. If A is */
679 /* > replaced with A^t, then the row pivoting is included automatically. */
680 /* > = 'N': do not speculate. */
681 /* > This option can be used to compute only the singular values, or the */
682 /* > full SVD (U, SIGMA and V). For only one set of singular vectors */
683 /* > (U or V), the caller should provide both U and V, as one of the */
684 /* > matrices is used as workspace if the matrix A is transposed. */
685 /* > The implementer can easily remove this constraint and make the */
686 /* > code more complicated. See the descriptions of U and V. */
689 /* > \param[in] JOBP */
691 /* > JOBP is CHARACTER*1 */
692 /* > Issues the licence to introduce structured perturbations to drown */
693 /* > denormalized numbers. This licence should be active if the */
694 /* > denormals are poorly implemented, causing slow computation, */
695 /* > especially in cases of fast convergence (!). For details see [1,2]. */
696 /* > For the sake of simplicity, this perturbations are included only */
697 /* > when the full SVD or only the singular values are requested. The */
698 /* > implementer/user can easily add the perturbation for the cases of */
699 /* > computing one set of singular vectors. */
700 /* > = 'P': introduce perturbation */
701 /* > = 'N': do not perturb */
707 /* > The number of rows of the input matrix A. M >= 0. */
713 /* > The number of columns of the input matrix A. M >= N >= 0. */
716 /* > \param[in,out] A */
718 /* > A is DOUBLE PRECISION array, dimension (LDA,N) */
719 /* > On entry, the M-by-N matrix A. */
722 /* > \param[in] LDA */
724 /* > LDA is INTEGER */
725 /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
728 /* > \param[out] SVA */
730 /* > SVA is DOUBLE PRECISION array, dimension (N) */
732 /* > - For WORK(1)/WORK(2) = ONE: The singular values of A. During the */
733 /* > computation SVA contains Euclidean column norms of the */
734 /* > iterated matrices in the array A. */
735 /* > - For WORK(1) .NE. WORK(2): The singular values of A are */
736 /* > (WORK(1)/WORK(2)) * SVA(1:N). This factored form is used if */
737 /* > sigma_max(A) overflows or if small singular values have been */
738 /* > saved from underflow by scaling the input matrix A. */
739 /* > - If JOBR='R' then some of the singular values may be returned */
740 /* > as exact zeros obtained by "set to zero" because they are */
741 /* > below the numerical rank threshold or are denormalized numbers. */
744 /* > \param[out] U */
746 /* > U is DOUBLE PRECISION array, dimension ( LDU, N ) */
747 /* > If JOBU = 'U', then U contains on exit the M-by-N matrix of */
748 /* > the left singular vectors. */
749 /* > If JOBU = 'F', then U contains on exit the M-by-M matrix of */
750 /* > the left singular vectors, including an ONB */
751 /* > of the orthogonal complement of the Range(A). */
752 /* > If JOBU = 'W' .AND. (JOBV = 'V' .AND. JOBT = 'T' .AND. M = N), */
753 /* > then U is used as workspace if the procedure */
754 /* > replaces A with A^t. In that case, [V] is computed */
755 /* > in U as left singular vectors of A^t and then */
756 /* > copied back to the V array. This 'W' option is just */
757 /* > a reminder to the caller that in this case U is */
758 /* > reserved as workspace of length N*N. */
759 /* > If JOBU = 'N' U is not referenced, unless JOBT='T'. */
762 /* > \param[in] LDU */
764 /* > LDU is INTEGER */
765 /* > The leading dimension of the array U, LDU >= 1. */
766 /* > IF JOBU = 'U' or 'F' or 'W', then LDU >= M. */
769 /* > \param[out] V */
771 /* > V is DOUBLE PRECISION array, dimension ( LDV, N ) */
772 /* > If JOBV = 'V', 'J' then V contains on exit the N-by-N matrix of */
773 /* > the right singular vectors; */
774 /* > If JOBV = 'W', AND (JOBU = 'U' AND JOBT = 'T' AND M = N), */
775 /* > then V is used as workspace if the pprocedure */
776 /* > replaces A with A^t. In that case, [U] is computed */
777 /* > in V as right singular vectors of A^t and then */
778 /* > copied back to the U array. This 'W' option is just */
779 /* > a reminder to the caller that in this case V is */
780 /* > reserved as workspace of length N*N. */
781 /* > If JOBV = 'N' V is not referenced, unless JOBT='T'. */
784 /* > \param[in] LDV */
786 /* > LDV is INTEGER */
787 /* > The leading dimension of the array V, LDV >= 1. */
788 /* > If JOBV = 'V' or 'J' or 'W', then LDV >= N. */
791 /* > \param[out] WORK */
793 /* > WORK is DOUBLE PRECISION array, dimension (LWORK) */
794 /* > On exit, if N > 0 .AND. M > 0 (else not referenced), */
795 /* > WORK(1) = SCALE = WORK(2) / WORK(1) is the scaling factor such */
796 /* > that SCALE*SVA(1:N) are the computed singular values */
797 /* > of A. (See the description of SVA().) */
798 /* > WORK(2) = See the description of WORK(1). */
799 /* > WORK(3) = SCONDA is an estimate for the condition number of */
800 /* > column equilibrated A. (If JOBA = 'E' or 'G') */
801 /* > SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
802 /* > It is computed using DPOCON. It holds */
803 /* > N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
804 /* > where R is the triangular factor from the QRF of A. */
805 /* > However, if R is truncated and the numerical rank is */
806 /* > determined to be strictly smaller than N, SCONDA is */
807 /* > returned as -1, thus indicating that the smallest */
808 /* > singular values might be lost. */
810 /* > If full SVD is needed, the following two condition numbers are */
811 /* > useful for the analysis of the algorithm. They are provied for */
812 /* > a developer/implementer who is familiar with the details of */
815 /* > WORK(4) = an estimate of the scaled condition number of the */
816 /* > triangular factor in the first QR factorization. */
817 /* > WORK(5) = an estimate of the scaled condition number of the */
818 /* > triangular factor in the second QR factorization. */
819 /* > The following two parameters are computed if JOBT = 'T'. */
820 /* > They are provided for a developer/implementer who is familiar */
821 /* > with the details of the method. */
823 /* > WORK(6) = the entropy of A^t*A :: this is the Shannon entropy */
824 /* > of diag(A^t*A) / Trace(A^t*A) taken as point in the */
825 /* > probability simplex. */
826 /* > WORK(7) = the entropy of A*A^t. */
829 /* > \param[in] LWORK */
831 /* > LWORK is INTEGER */
832 /* > Length of WORK to confirm proper allocation of work space. */
833 /* > LWORK depends on the job: */
835 /* > If only SIGMA is needed (JOBU = 'N', JOBV = 'N') and */
836 /* > -> .. no scaled condition estimate required (JOBE = 'N'): */
837 /* > LWORK >= f2cmax(2*M+N,4*N+1,7). This is the minimal requirement. */
838 /* > ->> For optimal performance (blocked code) the optimal value */
839 /* > is LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7). Here NB is the optimal */
840 /* > block size for DGEQP3 and DGEQRF. */
841 /* > In general, optimal LWORK is computed as */
842 /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), 7). */
843 /* > -> .. an estimate of the scaled condition number of A is */
844 /* > required (JOBA='E', 'G'). In this case, LWORK is the maximum */
845 /* > of the above and N*N+4*N, i.e. LWORK >= f2cmax(2*M+N,N*N+4*N,7). */
846 /* > ->> For optimal performance (blocked code) the optimal value */
847 /* > is LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB, N*N+4*N, 7). */
848 /* > In general, the optimal length LWORK is computed as */
849 /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DGEQRF), */
850 /* > N+N*N+LWORK(DPOCON),7). */
852 /* > If SIGMA and the right singular vectors are needed (JOBV = 'V'), */
853 /* > -> the minimal requirement is LWORK >= f2cmax(2*M+N,4*N+1,7). */
854 /* > -> For optimal performance, LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7), */
855 /* > where NB is the optimal block size for DGEQP3, DGEQRF, DGELQF, */
856 /* > DORMLQ. In general, the optimal length LWORK is computed as */
857 /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3), N+LWORK(DPOCON), */
858 /* > N+LWORK(DGELQF), 2*N+LWORK(DGEQRF), N+LWORK(DORMLQ)). */
860 /* > If SIGMA and the left singular vectors are needed */
861 /* > -> the minimal requirement is LWORK >= f2cmax(2*M+N,4*N+1,7). */
862 /* > -> For optimal performance: */
863 /* > if JOBU = 'U' :: LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,7), */
864 /* > if JOBU = 'F' :: LWORK >= f2cmax(2*M+N,3*N+(N+1)*NB,N+M*NB,7), */
865 /* > where NB is the optimal block size for DGEQP3, DGEQRF, DORMQR. */
866 /* > In general, the optimal length LWORK is computed as */
867 /* > LWORK >= f2cmax(2*M+N,N+LWORK(DGEQP3),N+LWORK(DPOCON), */
868 /* > 2*N+LWORK(DGEQRF), N+LWORK(DORMQR)). */
869 /* > Here LWORK(DORMQR) equals N*NB (for JOBU = 'U') or */
870 /* > M*NB (for JOBU = 'F'). */
872 /* > If the full SVD is needed: (JOBU = 'U' or JOBU = 'F') and */
873 /* > -> if JOBV = 'V' */
874 /* > the minimal requirement is LWORK >= f2cmax(2*M+N,6*N+2*N*N). */
875 /* > -> if JOBV = 'J' the minimal requirement is */
876 /* > LWORK >= f2cmax(2*M+N, 4*N+N*N,2*N+N*N+6). */
877 /* > -> For optimal performance, LWORK should be additionally */
878 /* > larger than N+M*NB, where NB is the optimal block size */
882 /* > \param[out] IWORK */
884 /* > IWORK is INTEGER array, dimension (M+3*N). */
886 /* > IWORK(1) = the numerical rank determined after the initial */
887 /* > QR factorization with pivoting. See the descriptions */
888 /* > of JOBA and JOBR. */
889 /* > IWORK(2) = the number of the computed nonzero singular values */
890 /* > IWORK(3) = if nonzero, a warning message: */
891 /* > If IWORK(3) = 1 then some of the column norms of A */
892 /* > were denormalized floats. The requested high accuracy */
893 /* > is not warranted by the data. */
896 /* > \param[out] INFO */
898 /* > INFO is INTEGER */
899 /* > < 0: if INFO = -i, then the i-th argument had an illegal value. */
900 /* > = 0: successful exit; */
901 /* > > 0: DGEJSV did not converge in the maximal allowed number */
902 /* > of sweeps. The computed values may be inaccurate. */
908 /* > \author Univ. of Tennessee */
909 /* > \author Univ. of California Berkeley */
910 /* > \author Univ. of Colorado Denver */
911 /* > \author NAG Ltd. */
913 /* > \date June 2016 */
915 /* > \ingroup doubleGEsing */
917 /* > \par Further Details: */
918 /* ===================== */
922 /* > DGEJSV implements a preconditioned Jacobi SVD algorithm. It uses DGEQP3, */
923 /* > DGEQRF, and DGELQF as preprocessors and preconditioners. Optionally, an */
924 /* > additional row pivoting can be used as a preprocessor, which in some */
925 /* > cases results in much higher accuracy. An example is matrix A with the */
926 /* > structure A = D1 * C * D2, where D1, D2 are arbitrarily ill-conditioned */
927 /* > diagonal matrices and C is well-conditioned matrix. In that case, complete */
928 /* > pivoting in the first QR factorizations provides accuracy dependent on the */
929 /* > condition number of C, and independent of D1, D2. Such higher accuracy is */
930 /* > not completely understood theoretically, but it works well in practice. */
931 /* > Further, if A can be written as A = B*D, with well-conditioned B and some */
932 /* > diagonal D, then the high accuracy is guaranteed, both theoretically and */
933 /* > in software, independent of D. For more details see [1], [2]. */
934 /* > The computational range for the singular values can be the full range */
935 /* > ( UNDERFLOW,OVERFLOW ), provided that the machine arithmetic and the BLAS */
936 /* > & LAPACK routines called by DGEJSV are implemented to work in that range. */
937 /* > If that is not the case, then the restriction for safe computation with */
938 /* > the singular values in the range of normalized IEEE numbers is that the */
939 /* > spectral condition number kappa(A)=sigma_max(A)/sigma_min(A) does not */
940 /* > overflow. This code (DGEJSV) is best used in this restricted range, */
941 /* > meaning that singular values of magnitude below ||A||_2 / DLAMCH('O') are */
942 /* > returned as zeros. See JOBR for details on this. */
943 /* > Further, this implementation is somewhat slower than the one described */
944 /* > in [1,2] due to replacement of some non-LAPACK components, and because */
945 /* > the choice of some tuning parameters in the iterative part (DGESVJ) is */
946 /* > left to the implementer on a particular machine. */
947 /* > The rank revealing QR factorization (in this code: DGEQP3) should be */
948 /* > implemented as in [3]. We have a new version of DGEQP3 under development */
949 /* > that is more robust than the current one in LAPACK, with a cleaner cut in */
950 /* > rank deficient cases. It will be available in the SIGMA library [4]. */
951 /* > If M is much larger than N, it is obvious that the initial QRF with */
952 /* > column pivoting can be preprocessed by the QRF without pivoting. That */
953 /* > well known trick is not used in DGEJSV because in some cases heavy row */
954 /* > weighting can be treated with complete pivoting. The overhead in cases */
955 /* > M much larger than N is then only due to pivoting, but the benefits in */
956 /* > terms of accuracy have prevailed. The implementer/user can incorporate */
957 /* > this extra QRF step easily. The implementer can also improve data movement */
958 /* > (matrix transpose, matrix copy, matrix transposed copy) - this */
959 /* > implementation of DGEJSV uses only the simplest, naive data movement. */
962 /* > \par Contributors: */
963 /* ================== */
965 /* > Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
967 /* > \par References: */
968 /* ================ */
972 /* > [1] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. */
973 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. */
974 /* > LAPACK Working note 169. */
975 /* > [2] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. */
976 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. */
977 /* > LAPACK Working note 170. */
978 /* > [3] Z. Drmac and Z. Bujanovic: On the failure of rank-revealing QR */
979 /* > factorization software - a case study. */
980 /* > ACM Trans. Math. Softw. Vol. 35, No 2 (2008), pp. 1-28. */
981 /* > LAPACK Working note 176. */
982 /* > [4] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
983 /* > QSVD, (H,K)-SVD computations. */
984 /* > Department of Mathematics, University of Zagreb, 2008. */
987 /* > \par Bugs, examples and comments: */
988 /* ================================= */
990 /* > Please report all bugs and send interesting examples and/or comments to */
991 /* > drmac@math.hr. Thank you. */
993 /* ===================================================================== */
994 /* Subroutine */ int dgejsv_(char *joba, char *jobu, char *jobv, char *jobr,
995 char *jobt, char *jobp, integer *m, integer *n, doublereal *a,
996 integer *lda, doublereal *sva, doublereal *u, integer *ldu,
997 doublereal *v, integer *ldv, doublereal *work, integer *lwork,
998 integer *iwork, integer *info)
1000 /* System generated locals */
1001 integer a_dim1, a_offset, u_dim1, u_offset, v_dim1, v_offset, i__1, i__2,
1002 i__3, i__4, i__5, i__6, i__7, i__8, i__9, i__10, i__11, i__12;
1003 doublereal d__1, d__2, d__3, d__4;
1005 /* Local variables */
1007 doublereal aapp, aaqq;
1010 extern doublereal dnrm2_(integer *, doublereal *, integer *);
1014 extern /* Subroutine */ int dscal_(integer *, doublereal *, doublereal *,
1016 extern logical lsame_(char *, char *);
1017 doublereal small, entra, sfmin;
1019 extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
1020 doublereal *, integer *), dswap_(integer *, doublereal *, integer
1021 *, doublereal *, integer *);
1024 extern /* Subroutine */ int dtrsm_(char *, char *, char *, char *,
1025 integer *, integer *, doublereal *, doublereal *, integer *,
1026 doublereal *, integer *);
1029 extern /* Subroutine */ int dgeqp3_(integer *, integer *, doublereal *,
1030 integer *, integer *, doublereal *, doublereal *, integer *,
1032 doublereal condr1, condr2, uscal1, uscal2;
1033 logical l2kill, l2rank, l2tran, l2pert;
1034 extern doublereal dlamch_(char *);
1036 extern /* Subroutine */ int dgelqf_(integer *, integer *, doublereal *,
1037 integer *, doublereal *, doublereal *, integer *, integer *);
1038 extern integer idamax_(integer *, doublereal *, integer *);
1040 extern /* Subroutine */ int dlascl_(char *, integer *, integer *,
1041 doublereal *, doublereal *, integer *, integer *, doublereal *,
1042 integer *, integer *);
1046 extern /* Subroutine */ int dgeqrf_(integer *, integer *, doublereal *,
1047 integer *, doublereal *, doublereal *, integer *, integer *);
1049 extern /* Subroutine */ int dlacpy_(char *, integer *, integer *,
1050 doublereal *, integer *, doublereal *, integer *),
1051 dlaset_(char *, integer *, integer *, doublereal *, doublereal *,
1052 doublereal *, integer *), xerbla_(char *, integer *, ftnlen);
1054 extern /* Subroutine */ int dpocon_(char *, integer *, doublereal *,
1055 integer *, doublereal *, doublereal *, doublereal *, integer *,
1056 integer *), dgesvj_(char *, char *, char *, integer *,
1057 integer *, doublereal *, integer *, doublereal *, integer *,
1058 doublereal *, integer *, doublereal *, integer *, integer *), dlassq_(integer *, doublereal *, integer
1059 *, doublereal *, doublereal *), dlaswp_(integer *, doublereal *,
1060 integer *, integer *, integer *, integer *, integer *);
1063 extern /* Subroutine */ int dorgqr_(integer *, integer *, integer *,
1064 doublereal *, integer *, doublereal *, doublereal *, integer *,
1065 integer *), dormlq_(char *, char *, integer *, integer *, integer
1066 *, doublereal *, integer *, doublereal *, doublereal *, integer *,
1067 doublereal *, integer *, integer *);
1070 extern /* Subroutine */ int dormqr_(char *, char *, integer *, integer *,
1071 integer *, doublereal *, integer *, doublereal *, doublereal *,
1072 integer *, doublereal *, integer *, integer *);
1073 logical transp, rowpiv;
1074 doublereal big, cond_ok__, xsc, big1;
1075 integer warning, numrank;
1078 /* -- LAPACK computational routine (version 3.7.1) -- */
1079 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
1080 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
1084 /* =========================================================================== */
1088 /* Test the input arguments */
1090 /* Parameter adjustments */
1093 a_offset = 1 + a_dim1 * 1;
1096 u_offset = 1 + u_dim1 * 1;
1099 v_offset = 1 + v_dim1 * 1;
1105 lsvec = lsame_(jobu, "U") || lsame_(jobu, "F");
1106 jracc = lsame_(jobv, "J");
1107 rsvec = lsame_(jobv, "V") || jracc;
1108 rowpiv = lsame_(joba, "F") || lsame_(joba, "G");
1109 l2rank = lsame_(joba, "R");
1110 l2aber = lsame_(joba, "A");
1111 errest = lsame_(joba, "E") || lsame_(joba, "G");
1112 l2tran = lsame_(jobt, "T");
1113 l2kill = lsame_(jobr, "R");
1114 defr = lsame_(jobr, "N");
1115 l2pert = lsame_(jobp, "P");
1117 if (! (rowpiv || l2rank || l2aber || errest || lsame_(joba, "C"))) {
1119 } else if (! (lsvec || lsame_(jobu, "N") || lsame_(
1122 } else if (! (rsvec || lsame_(jobv, "N") || lsame_(
1123 jobv, "W")) || jracc && ! lsvec) {
1125 } else if (! (l2kill || defr)) {
1127 } else if (! (l2tran || lsame_(jobt, "N"))) {
1129 } else if (! (l2pert || lsame_(jobp, "N"))) {
1131 } else if (*m < 0) {
1133 } else if (*n < 0 || *n > *m) {
1135 } else if (*lda < *m) {
1137 } else if (lsvec && *ldu < *m) {
1139 } else if (rsvec && *ldv < *n) {
1141 } else /* if(complicated condition) */ {
1143 i__1 = 7, i__2 = (*n << 2) + 1, i__1 = f2cmax(i__1,i__2), i__2 = (*m <<
1146 i__3 = 7, i__4 = (*n << 2) + *n * *n, i__3 = f2cmax(i__3,i__4), i__4 = (*
1149 i__5 = 7, i__6 = (*m << 1) + *n, i__5 = f2cmax(i__5,i__6), i__6 = (*n <<
1152 i__7 = 7, i__8 = (*m << 1) + *n, i__7 = f2cmax(i__7,i__8), i__8 = (*n <<
1155 i__9 = (*m << 1) + *n, i__10 = *n * 6 + (*n << 1) * *n;
1157 i__11 = (*m << 1) + *n, i__12 = (*n << 2) + *n * *n, i__11 = f2cmax(
1158 i__11,i__12), i__12 = (*n << 1) + *n * *n + 6;
1159 if (! (lsvec || rsvec || errest) && *lwork < f2cmax(i__1,i__2) || ! (
1160 lsvec || rsvec) && errest && *lwork < f2cmax(i__3,i__4) || lsvec
1161 && ! rsvec && *lwork < f2cmax(i__5,i__6) || rsvec && ! lsvec && *
1162 lwork < f2cmax(i__7,i__8) || lsvec && rsvec && ! jracc && *lwork
1163 < f2cmax(i__9,i__10) || lsvec && rsvec && jracc && *lwork < f2cmax(
1175 xerbla_("DGEJSV", &i__1, (ftnlen)6);
1179 /* Quick return for void matrix (Y3K safe) */
1181 if (*m == 0 || *n == 0) {
1195 /* Determine whether the matrix U should be M x N or M x M */
1199 if (lsame_(jobu, "F")) {
1204 /* Set numerical parameters */
1206 /* ! NOTE: Make sure DLAMCH() does not fail on the target architecture. */
1208 epsln = dlamch_("Epsilon");
1209 sfmin = dlamch_("SafeMinimum");
1210 small = sfmin / epsln;
1212 /* BIG = ONE / SFMIN */
1214 /* Initialize SVA(1:N) = diag( ||A e_i||_2 )_1^N */
1216 /* (!) If necessary, scale SVA() to protect the largest norm from */
1217 /* overflow. It is possible that this scaling pushes the smallest */
1218 /* column norm left from the underflow threshold (extreme case). */
1220 scalem = 1. / sqrt((doublereal) (*m) * (doublereal) (*n));
1224 for (p = 1; p <= i__1; ++p) {
1227 dlassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
1231 xerbla_("DGEJSV", &i__2, (ftnlen)6);
1235 if (aapp < big / aaqq && noscal) {
1236 sva[p] = aapp * aaqq;
1239 sva[p] = aapp * (aaqq * scalem);
1243 dscal_(&i__2, &scalem, &sva[1], &c__1);
1256 for (p = 1; p <= i__1; ++p) {
1258 d__1 = aapp, d__2 = sva[p];
1259 aapp = f2cmax(d__1,d__2);
1262 d__1 = aaqq, d__2 = sva[p];
1263 aaqq = f2cmin(d__1,d__2);
1268 /* Quick return for zero M x N matrix */
1272 dlaset_("G", m, &n1, &c_b34, &c_b35, &u[u_offset], ldu)
1276 dlaset_("G", n, n, &c_b34, &c_b35, &v[v_offset], ldv);
1283 if (lsvec && rsvec) {
1297 /* Issue warning if denormalized column norms detected. Override the */
1298 /* high relative accuracy request. Issue licence to kill columns */
1299 /* (set them to zero) whose norm is less than sigma_max / BIG (roughly). */
1302 if (aaqq <= sfmin) {
1308 /* Quick return for one-column matrix */
1313 dlascl_("G", &c__0, &c__0, &sva[1], &scalem, m, &c__1, &a[a_dim1
1315 dlacpy_("A", m, &c__1, &a[a_offset], lda, &u[u_offset], ldu);
1316 /* computing all M left singular vectors of the M x 1 matrix */
1319 dgeqrf_(m, n, &u[u_offset], ldu, &work[1], &work[*n + 1], &
1322 dorgqr_(m, &n1, &c__1, &u[u_offset], ldu, &work[1], &work[*n
1323 + 1], &i__1, &ierr);
1324 dcopy_(m, &a[a_dim1 + 1], &c__1, &u[u_dim1 + 1], &c__1);
1330 if (sva[1] < big * scalem) {
1334 work[1] = 1. / scalem;
1338 if (sva[1] / scalem >= sfmin) {
1351 if (lsvec && rsvec) {
1364 l2tran = l2tran && *m == *n;
1368 if (rowpiv || l2tran) {
1370 /* Compute the row norms, needed to determine row pivoting sequence */
1371 /* (in the case of heavily row weighted A, row pivoting is strongly */
1372 /* advised) and to collect information needed to compare the */
1373 /* structures of A * A^t and A^t * A (in the case L2TRAN.EQ..TRUE.). */
1377 for (p = 1; p <= i__1; ++p) {
1380 dlassq_(n, &a[p + a_dim1], lda, &xsc, &temp1);
1381 /* DLASSQ gets both the ell_2 and the ell_infinity norm */
1382 /* in one pass through the vector */
1383 work[*m + *n + p] = xsc * scalem;
1384 work[*n + p] = xsc * (scalem * sqrt(temp1));
1386 d__1 = aatmax, d__2 = work[*n + p];
1387 aatmax = f2cmax(d__1,d__2);
1388 if (work[*n + p] != 0.) {
1390 d__1 = aatmin, d__2 = work[*n + p];
1391 aatmin = f2cmin(d__1,d__2);
1397 for (p = 1; p <= i__1; ++p) {
1398 work[*m + *n + p] = scalem * (d__1 = a[p + idamax_(n, &a[p +
1399 a_dim1], lda) * a_dim1], abs(d__1));
1401 d__1 = aatmax, d__2 = work[*m + *n + p];
1402 aatmax = f2cmax(d__1,d__2);
1404 d__1 = aatmin, d__2 = work[*m + *n + p];
1405 aatmin = f2cmin(d__1,d__2);
1412 /* For square matrix A try to determine whether A^t would be better */
1413 /* input for the preconditioned Jacobi SVD, with faster convergence. */
1414 /* The decision is based on an O(N) function of the vector of column */
1415 /* and row norms of A, based on the Shannon entropy. This should give */
1416 /* the right choice in most cases when the difference actually matters. */
1417 /* It may fail and pick the slower converging side. */
1425 dlassq_(n, &sva[1], &c__1, &xsc, &temp1);
1430 for (p = 1; p <= i__1; ++p) {
1431 /* Computing 2nd power */
1432 d__1 = sva[p] / xsc;
1433 big1 = d__1 * d__1 * temp1;
1435 entra += big1 * log(big1);
1439 entra = -entra / log((doublereal) (*n));
1441 /* Now, SVA().^2/Trace(A^t * A) is a point in the probability simplex. */
1442 /* It is derived from the diagonal of A^t * A. Do the same with the */
1443 /* diagonal of A * A^t, compute the entropy of the corresponding */
1444 /* probability distribution. Note that A * A^t and A^t * A have the */
1449 for (p = *n + 1; p <= i__1; ++p) {
1450 /* Computing 2nd power */
1451 d__1 = work[p] / xsc;
1452 big1 = d__1 * d__1 * temp1;
1454 entrat += big1 * log(big1);
1458 entrat = -entrat / log((doublereal) (*m));
1460 /* Analyze the entropies and decide A or A^t. Smaller entropy */
1461 /* usually means better input for the algorithm. */
1463 transp = entrat < entra;
1465 /* If A^t is better than A, transpose A. */
1468 /* In an optimal implementation, this trivial transpose */
1469 /* should be replaced with faster transpose. */
1471 for (p = 1; p <= i__1; ++p) {
1473 for (q = p + 1; q <= i__2; ++q) {
1474 temp1 = a[q + p * a_dim1];
1475 a[q + p * a_dim1] = a[p + q * a_dim1];
1476 a[p + q * a_dim1] = temp1;
1482 for (p = 1; p <= i__1; ++p) {
1483 work[*m + *n + p] = sva[p];
1484 sva[p] = work[*n + p];
1506 /* Scale the matrix so that its maximal singular value remains less */
1507 /* than DSQRT(BIG) -- the matrix is scaled so that its maximal column */
1508 /* has Euclidean norm equal to DSQRT(BIG/N). The only reason to keep */
1509 /* DSQRT(BIG) instead of BIG is the fact that DGEJSV uses LAPACK and */
1510 /* BLAS routines that, in some implementations, are not capable of */
1511 /* working in the full interval [SFMIN,BIG] and that they may provoke */
1512 /* overflows in the intermediate results. If the singular values spread */
1513 /* from SFMIN to BIG, then DGESVJ will compute them. So, in that case, */
1514 /* one should use DGESVJ instead of DGEJSV. */
1517 temp1 = sqrt(big / (doublereal) (*n));
1519 dlascl_("G", &c__0, &c__0, &aapp, &temp1, n, &c__1, &sva[1], n, &ierr);
1520 if (aaqq > aapp * sfmin) {
1521 aaqq = aaqq / aapp * temp1;
1523 aaqq = aaqq * temp1 / aapp;
1526 dlascl_("G", &c__0, &c__0, &aapp, &temp1, m, n, &a[a_offset], lda, &ierr);
1528 /* To undo scaling at the end of this procedure, multiply the */
1529 /* computed singular values with USCAL2 / USCAL1. */
1535 /* L2KILL enforces computation of nonzero singular values in */
1536 /* the restricted range of condition number of the initial A, */
1537 /* sigma_max(A) / sigma_min(A) approx. DSQRT(BIG)/DSQRT(SFMIN). */
1542 /* Now, if the condition number of A is too big, */
1543 /* sigma_max(A) / sigma_min(A) .GT. DSQRT(BIG/N) * EPSLN / SFMIN, */
1544 /* as a precaution measure, the full SVD is computed using DGESVJ */
1545 /* with accumulated Jacobi rotations. This provides numerically */
1546 /* more robust computation, at the cost of slightly increased run */
1547 /* time. Depending on the concrete implementation of BLAS and LAPACK */
1548 /* (i.e. how they behave in presence of extreme ill-conditioning) the */
1549 /* implementor may decide to remove this switch. */
1550 if (aaqq < sqrt(sfmin) && lsvec && rsvec) {
1557 for (p = 1; p <= i__1; ++p) {
1559 dlaset_("A", m, &c__1, &c_b34, &c_b34, &a[p * a_dim1 + 1],
1567 /* Preconditioning using QR factorization with pivoting */
1570 /* Optional row permutation (Bjoerck row pivoting): */
1571 /* A result by Cox and Higham shows that the Bjoerck's */
1572 /* row pivoting combined with standard column pivoting */
1573 /* has similar effect as Powell-Reid complete pivoting. */
1574 /* The ell-infinity norms of A are made nonincreasing. */
1576 for (p = 1; p <= i__1; ++p) {
1578 q = idamax_(&i__2, &work[*m + *n + p], &c__1) + p - 1;
1579 iwork[(*n << 1) + p] = q;
1581 temp1 = work[*m + *n + p];
1582 work[*m + *n + p] = work[*m + *n + q];
1583 work[*m + *n + q] = temp1;
1588 dlaswp_(n, &a[a_offset], lda, &c__1, &i__1, &iwork[(*n << 1) + 1], &
1592 /* End of the preparation phase (scaling, optional sorting and */
1593 /* transposing, optional flushing of small columns). */
1595 /* Preconditioning */
1597 /* If the full SVD is needed, the right singular vectors are computed */
1598 /* from a matrix equation, and for that we need theoretical analysis */
1599 /* of the Businger-Golub pivoting. So we use DGEQP3 as the first RR QRF. */
1600 /* In all other cases the first RR QRF can be chosen by other criteria */
1601 /* (eg speed by replacing global with restricted window pivoting, such */
1602 /* as in SGEQPX from TOMS # 782). Good results will be obtained using */
1603 /* SGEQPX with properly (!) chosen numerical parameters. */
1604 /* Any improvement of DGEQP3 improves overal performance of DGEJSV. */
1606 /* A * P1 = Q1 * [ R1^t 0]^t: */
1608 for (p = 1; p <= i__1; ++p) {
1613 dgeqp3_(m, n, &a[a_offset], lda, &iwork[1], &work[1], &work[*n + 1], &
1616 /* The upper triangular matrix R1 from the first QRF is inspected for */
1617 /* rank deficiency and possibilities for deflation, or possible */
1618 /* ill-conditioning. Depending on the user specified flag L2RANK, */
1619 /* the procedure explores possibilities to reduce the numerical */
1620 /* rank by inspecting the computed upper triangular factor. If */
1621 /* L2RANK or L2ABER are up, then DGEJSV will compute the SVD of */
1622 /* A + dA, where ||dA|| <= f(M,N)*EPSLN. */
1626 /* Standard absolute error bound suffices. All sigma_i with */
1627 /* sigma_i < N*EPSLN*||A|| are flushed to zero. This is an */
1628 /* aggressive enforcement of lower numerical rank by introducing a */
1629 /* backward error of the order of N*EPSLN*||A||. */
1630 temp1 = sqrt((doublereal) (*n)) * epsln;
1632 for (p = 2; p <= i__1; ++p) {
1633 if ((d__2 = a[p + p * a_dim1], abs(d__2)) >= temp1 * (d__1 = a[
1634 a_dim1 + 1], abs(d__1))) {
1643 } else if (l2rank) {
1644 /* Sudden drop on the diagonal of R1 is used as the criterion for */
1645 /* close-to-rank-deficient. */
1646 temp1 = sqrt(sfmin);
1648 for (p = 2; p <= i__1; ++p) {
1649 if ((d__2 = a[p + p * a_dim1], abs(d__2)) < epsln * (d__1 = a[p -
1650 1 + (p - 1) * a_dim1], abs(d__1)) || (d__3 = a[p + p *
1651 a_dim1], abs(d__3)) < small || l2kill && (d__4 = a[p + p *
1652 a_dim1], abs(d__4)) < temp1) {
1662 /* The goal is high relative accuracy. However, if the matrix */
1663 /* has high scaled condition number the relative accuracy is in */
1664 /* general not feasible. Later on, a condition number estimator */
1665 /* will be deployed to estimate the scaled condition number. */
1666 /* Here we just remove the underflowed part of the triangular */
1667 /* factor. This prevents the situation in which the code is */
1668 /* working hard to get the accuracy not warranted by the data. */
1669 temp1 = sqrt(sfmin);
1671 for (p = 2; p <= i__1; ++p) {
1672 if ((d__1 = a[p + p * a_dim1], abs(d__1)) < small || l2kill && (
1673 d__2 = a[p + p * a_dim1], abs(d__2)) < temp1) {
1688 for (p = 2; p <= i__1; ++p) {
1689 temp1 = (d__1 = a[p + p * a_dim1], abs(d__1)) / sva[iwork[p]];
1690 maxprj = f2cmin(maxprj,temp1);
1693 /* Computing 2nd power */
1695 if (d__1 * d__1 >= 1. - (doublereal) (*n) * epsln) {
1708 dlacpy_("U", n, n, &a[a_offset], lda, &v[v_offset], ldv);
1710 for (p = 1; p <= i__1; ++p) {
1711 temp1 = sva[iwork[p]];
1713 dscal_(&p, &d__1, &v[p * v_dim1 + 1], &c__1);
1716 dpocon_("U", n, &v[v_offset], ldv, &c_b35, &temp1, &work[*n +
1717 1], &iwork[(*n << 1) + *m + 1], &ierr);
1719 dlacpy_("U", n, n, &a[a_offset], lda, &u[u_offset], ldu);
1721 for (p = 1; p <= i__1; ++p) {
1722 temp1 = sva[iwork[p]];
1724 dscal_(&p, &d__1, &u[p * u_dim1 + 1], &c__1);
1727 dpocon_("U", n, &u[u_offset], ldu, &c_b35, &temp1, &work[*n +
1728 1], &iwork[(*n << 1) + *m + 1], &ierr);
1730 dlacpy_("U", n, n, &a[a_offset], lda, &work[*n + 1], n);
1732 for (p = 1; p <= i__1; ++p) {
1733 temp1 = sva[iwork[p]];
1735 dscal_(&p, &d__1, &work[*n + (p - 1) * *n + 1], &c__1);
1738 dpocon_("U", n, &work[*n + 1], n, &c_b35, &temp1, &work[*n + *
1739 n * *n + 1], &iwork[(*n << 1) + *m + 1], &ierr);
1741 sconda = 1. / sqrt(temp1);
1742 /* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1). */
1743 /* N^(-1/4) * SCONDA <= ||R^(-1)||_2 <= N^(1/4) * SCONDA */
1749 l2pert = l2pert && (d__1 = a[a_dim1 + 1] / a[nr + nr * a_dim1], abs(d__1))
1751 /* If there is no violent scaling, artificial perturbation is not needed. */
1755 if (! (rsvec || lsvec)) {
1757 /* Singular Values only */
1761 i__1 = f2cmin(i__2,nr);
1762 for (p = 1; p <= i__1; ++p) {
1764 dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
1769 /* The following two DO-loops introduce small relative perturbation */
1770 /* into the strict upper triangle of the lower triangular matrix. */
1771 /* Small entries below the main diagonal are also changed. */
1772 /* This modification is useful if the computing environment does not */
1773 /* provide/allow FLUSH TO ZERO underflow, for it prevents many */
1774 /* annoying denormalized numbers in case of strongly scaled matrices. */
1775 /* The perturbation is structured so that it does not introduce any */
1776 /* new perturbation of the singular values, and it does not destroy */
1777 /* the job done by the preconditioner. */
1778 /* The licence for this perturbation is in the variable L2PERT, which */
1779 /* should be .FALSE. if FLUSH TO ZERO underflow is active. */
1784 /* XSC = DSQRT(SMALL) */
1785 xsc = epsln / (doublereal) (*n);
1787 for (q = 1; q <= i__1; ++q) {
1788 temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
1790 for (p = 1; p <= i__2; ++p) {
1791 if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
1793 a[p + q * a_dim1] = d_sign(&temp1, &a[p + q *
1803 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) +
1809 dgeqrf_(n, &nr, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
1813 for (p = 1; p <= i__1; ++p) {
1815 dcopy_(&i__2, &a[p + (p + 1) * a_dim1], lda, &a[p + 1 + p *
1822 /* Row-cyclic Jacobi SVD algorithm with column pivoting */
1824 /* to drown denormals */
1826 /* XSC = DSQRT(SMALL) */
1827 xsc = epsln / (doublereal) (*n);
1829 for (q = 1; q <= i__1; ++q) {
1830 temp1 = xsc * (d__1 = a[q + q * a_dim1], abs(d__1));
1832 for (p = 1; p <= i__2; ++p) {
1833 if (p > q && (d__1 = a[p + q * a_dim1], abs(d__1)) <=
1835 a[p + q * a_dim1] = d_sign(&temp1, &a[p + q * a_dim1])
1845 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &a[(a_dim1 << 1) + 1],
1849 /* triangular matrix (plus perturbation which is ignored in */
1850 /* the part which destroys triangular form (confusing?!)) */
1852 dgesvj_("L", "NoU", "NoV", &nr, &nr, &a[a_offset], lda, &sva[1], n, &
1853 v[v_offset], ldv, &work[1], lwork, info);
1856 numrank = i_dnnt(&work[2]);
1859 } else if (rsvec && ! lsvec) {
1861 /* -> Singular Values and Right Singular Vectors <- */
1866 for (p = 1; p <= i__1; ++p) {
1868 dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
1874 dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
1877 dgesvj_("L", "U", "N", n, &nr, &v[v_offset], ldv, &sva[1], &nr, &
1878 a[a_offset], lda, &work[1], lwork, info);
1880 numrank = i_dnnt(&work[2]);
1883 /* accumulated product of Jacobi rotations, three are perfect ) */
1887 dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &a[a_dim1 + 2],
1890 dgelqf_(&nr, n, &a[a_offset], lda, &work[1], &work[*n + 1], &i__1,
1892 dlacpy_("Lower", &nr, &nr, &a[a_offset], lda, &v[v_offset], ldv);
1895 dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
1897 i__1 = *lwork - (*n << 1);
1898 dgeqrf_(&nr, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n <<
1899 1) + 1], &i__1, &ierr);
1901 for (p = 1; p <= i__1; ++p) {
1903 dcopy_(&i__2, &v[p + p * v_dim1], ldv, &v[p + p * v_dim1], &
1909 dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
1912 dgesvj_("Lower", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[1], &
1913 nr, &u[u_offset], ldu, &work[*n + 1], lwork, info);
1914 scalem = work[*n + 1];
1915 numrank = i_dnnt(&work[*n + 2]);
1918 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
1921 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
1925 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
1930 dormlq_("Left", "Transpose", n, n, &nr, &a[a_offset], lda, &work[
1931 1], &v[v_offset], ldv, &work[*n + 1], &i__1, &ierr);
1936 for (p = 1; p <= i__1; ++p) {
1937 dcopy_(n, &v[p + v_dim1], ldv, &a[iwork[p] + a_dim1], lda);
1940 dlacpy_("All", n, n, &a[a_offset], lda, &v[v_offset], ldv);
1943 dlacpy_("All", n, n, &v[v_offset], ldv, &u[u_offset], ldu);
1946 } else if (lsvec && ! rsvec) {
1949 /* Jacobi rotations in the Jacobi iterations. */
1951 for (p = 1; p <= i__1; ++p) {
1953 dcopy_(&i__2, &a[p + p * a_dim1], lda, &u[p + p * u_dim1], &c__1);
1958 dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
1961 i__1 = *lwork - (*n << 1);
1962 dgeqrf_(n, &nr, &u[u_offset], ldu, &work[*n + 1], &work[(*n << 1) + 1]
1966 for (p = 1; p <= i__1; ++p) {
1968 dcopy_(&i__2, &u[p + (p + 1) * u_dim1], ldu, &u[p + 1 + p *
1974 dlaset_("Upper", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) + 1],
1978 dgesvj_("Lower", "U", "N", &nr, &nr, &u[u_offset], ldu, &sva[1], &nr,
1979 &a[a_offset], lda, &work[*n + 1], &i__1, info);
1980 scalem = work[*n + 1];
1981 numrank = i_dnnt(&work[*n + 2]);
1985 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1], ldu);
1988 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) * u_dim1
1992 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (nr +
1998 dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &u[
1999 u_offset], ldu, &work[*n + 1], &i__1, &ierr);
2003 dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1) +
2008 for (p = 1; p <= i__1; ++p) {
2009 xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
2010 dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
2015 dlacpy_("All", n, n, &u[u_offset], ldu, &v[v_offset], ldv);
2025 /* Second Preconditioning Step (QRF [with pivoting]) */
2026 /* Note that the composition of TRANSPOSE, QRF and TRANSPOSE is */
2027 /* equivalent to an LQF CALL. Since in many libraries the QRF */
2028 /* seems to be better optimized than the LQF, we do explicit */
2029 /* transpose and use the QRF. This is subject to changes in an */
2030 /* optimized implementation of DGEJSV. */
2033 for (p = 1; p <= i__1; ++p) {
2035 dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1],
2040 /* denormals in the second QR factorization, where they are */
2041 /* as good as zeros. This is done to avoid painfully slow */
2042 /* computation with denormals. The relative size of the perturbation */
2043 /* is a parameter that can be changed by the implementer. */
2044 /* This perturbation device will be obsolete on machines with */
2045 /* properly implemented arithmetic. */
2046 /* To switch it off, set L2PERT=.FALSE. To remove it from the */
2047 /* code, remove the action under L2PERT=.TRUE., leave the ELSE part. */
2048 /* The following two loops should be blocked and fused with the */
2049 /* transposed copy above. */
2054 for (q = 1; q <= i__1; ++q) {
2055 temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
2057 for (p = 1; p <= i__2; ++p) {
2058 if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1))
2059 <= temp1 || p < q) {
2060 v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
2064 v[p + q * v_dim1] = -v[p + q * v_dim1];
2073 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
2077 /* Estimate the row scaled condition number of R1 */
2078 /* (If R1 is rectangular, N > NR, then the condition number */
2079 /* of the leading NR x NR submatrix is estimated.) */
2081 dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1]
2084 for (p = 1; p <= i__1; ++p) {
2086 temp1 = dnrm2_(&i__2, &work[(*n << 1) + (p - 1) * nr + p],
2090 dscal_(&i__2, &d__1, &work[(*n << 1) + (p - 1) * nr + p],
2094 dpocon_("Lower", &nr, &work[(*n << 1) + 1], &nr, &c_b35, &
2095 temp1, &work[(*n << 1) + nr * nr + 1], &iwork[*m + (*
2096 n << 1) + 1], &ierr);
2097 condr1 = 1. / sqrt(temp1);
2098 /* R1 is OK for inverse <=> CONDR1 .LT. DBLE(N) */
2099 /* more conservative <=> CONDR1 .LT. DSQRT(DBLE(N)) */
2101 cond_ok__ = sqrt((doublereal) nr);
2102 /* [TP] COND_OK is a tuning parameter. */
2103 if (condr1 < cond_ok__) {
2104 /* implementation, this QRF should be implemented as the QRF */
2105 /* of a lower triangular matrix. */
2106 /* R1^t = Q2 * R2 */
2107 i__1 = *lwork - (*n << 1);
2108 dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*
2109 n << 1) + 1], &i__1, &ierr);
2112 xsc = sqrt(small) / epsln;
2114 for (p = 2; p <= i__1; ++p) {
2116 for (q = 1; q <= i__2; ++q) {
2118 d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
2119 d__4 = (d__2 = v[q + q * v_dim1], abs(
2121 temp1 = xsc * f2cmin(d__3,d__4);
2122 if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
2124 v[q + p * v_dim1] = d_sign(&temp1, &v[q +
2134 dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n <<
2139 for (p = 1; p <= i__1; ++p) {
2141 dcopy_(&i__2, &v[p + (p + 1) * v_dim1], ldv, &v[p + 1
2142 + p * v_dim1], &c__1);
2150 /* Note that windowed pivoting would be equally good */
2151 /* numerically, and more run-time efficient. So, in */
2152 /* an optimal implementation, the next call to DGEQP3 */
2153 /* should be replaced with eg. CALL SGEQPX (ACM TOMS #782) */
2154 /* with properly (carefully) chosen parameters. */
2156 /* R1^t * P2 = Q2 * R2 */
2158 for (p = 1; p <= i__1; ++p) {
2162 i__1 = *lwork - (*n << 1);
2163 dgeqp3_(n, &nr, &v[v_offset], ldv, &iwork[*n + 1], &work[*
2164 n + 1], &work[(*n << 1) + 1], &i__1, &ierr);
2165 /* * CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1), */
2166 /* * $ LWORK-2*N, IERR ) */
2170 for (p = 2; p <= i__1; ++p) {
2172 for (q = 1; q <= i__2; ++q) {
2174 d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
2175 d__4 = (d__2 = v[q + q * v_dim1], abs(
2177 temp1 = xsc * f2cmin(d__3,d__4);
2178 if ((d__1 = v[q + p * v_dim1], abs(d__1)) <=
2180 v[q + p * v_dim1] = d_sign(&temp1, &v[q +
2189 dlacpy_("A", n, &nr, &v[v_offset], ldv, &work[(*n << 1) +
2195 for (p = 2; p <= i__1; ++p) {
2197 for (q = 1; q <= i__2; ++q) {
2199 d__3 = (d__1 = v[p + p * v_dim1], abs(d__1)),
2200 d__4 = (d__2 = v[q + q * v_dim1], abs(
2202 temp1 = xsc * f2cmin(d__3,d__4);
2203 v[p + q * v_dim1] = -d_sign(&temp1, &v[q + p *
2212 dlaset_("L", &i__1, &i__2, &c_b34, &c_b34, &v[v_dim1
2215 /* Now, compute R2 = L3 * Q3, the LQ factorization. */
2216 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2217 dgelqf_(&nr, &nr, &v[v_offset], ldv, &work[(*n << 1) + *n
2218 * nr + 1], &work[(*n << 1) + *n * nr + nr + 1], &
2220 dlacpy_("L", &nr, &nr, &v[v_offset], ldv, &work[(*n << 1)
2221 + *n * nr + nr + 1], &nr);
2223 for (p = 1; p <= i__1; ++p) {
2224 temp1 = dnrm2_(&p, &work[(*n << 1) + *n * nr + nr + p]
2227 dscal_(&p, &d__1, &work[(*n << 1) + *n * nr + nr + p],
2231 dpocon_("L", &nr, &work[(*n << 1) + *n * nr + nr + 1], &
2232 nr, &c_b35, &temp1, &work[(*n << 1) + *n * nr +
2233 nr + nr * nr + 1], &iwork[*m + (*n << 1) + 1], &
2235 condr2 = 1. / sqrt(temp1);
2237 if (condr2 >= cond_ok__) {
2238 /* (this overwrites the copy of R2, as it will not be */
2239 /* needed in this branch, but it does not overwritte the */
2240 /* Huseholder vectors of Q2.). */
2241 dlacpy_("U", &nr, &nr, &v[v_offset], ldv, &work[(*n <<
2243 /* WORK(2*N+N*NR+1:2*N+N*NR+N) */
2251 for (q = 2; q <= i__1; ++q) {
2252 temp1 = xsc * v[q + q * v_dim1];
2254 for (p = 1; p <= i__2; ++p) {
2255 /* V(p,q) = - DSIGN( TEMP1, V(q,p) ) */
2256 v[p + q * v_dim1] = -d_sign(&temp1, &v[p + q *
2265 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 <<
2269 /* Second preconditioning finished; continue with Jacobi SVD */
2270 /* The input matrix is lower trinagular. */
2272 /* Recover the right singular vectors as solution of a well */
2273 /* conditioned triangular matrix equation. */
2275 if (condr1 < cond_ok__) {
2277 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2278 dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
2279 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
2280 nr + nr + 1], &i__1, info);
2281 scalem = work[(*n << 1) + *n * nr + nr + 1];
2282 numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
2284 for (p = 1; p <= i__1; ++p) {
2285 dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
2287 dscal_(&nr, &sva[p], &v[p * v_dim1 + 1], &c__1);
2292 /* :)) .. best case, R1 is inverted. The solution of this matrix */
2293 /* equation is Q2*V2 = the product of the Jacobi rotations */
2294 /* used in DGESVJ, premultiplied with the orthogonal matrix */
2295 /* from the second QR factorization. */
2296 dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &a[
2297 a_offset], lda, &v[v_offset], ldv);
2299 /* is inverted to get the product of the Jacobi rotations */
2300 /* used in DGESVJ. The Q-factor from the second QR */
2301 /* factorization is then built in explicitly. */
2302 dtrsm_("L", "U", "T", "N", &nr, &nr, &c_b35, &work[(*
2303 n << 1) + 1], n, &v[v_offset], ldv);
2306 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr +
2309 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr +
2310 1) * v_dim1 + 1], ldv);
2313 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr
2314 + 1 + (nr + 1) * v_dim1], ldv);
2316 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2317 dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n,
2318 &work[*n + 1], &v[v_offset], ldv, &work[(*n <<
2319 1) + *n * nr + nr + 1], &i__1, &ierr);
2322 } else if (condr2 < cond_ok__) {
2324 /* :) .. the input matrix A is very likely a relative of */
2325 /* the Kahan matrix :) */
2326 /* The matrix R2 is inverted. The solution of the matrix equation */
2327 /* is Q3^T*V3 = the product of the Jacobi rotations (appplied to */
2328 /* the lower triangular L3 from the LQ factorization of */
2329 /* R2=L3*Q3), pre-multiplied with the transposed Q3. */
2330 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2331 dgesvj_("L", "U", "N", &nr, &nr, &v[v_offset], ldv, &sva[
2332 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
2333 nr + nr + 1], &i__1, info);
2334 scalem = work[(*n << 1) + *n * nr + nr + 1];
2335 numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
2337 for (p = 1; p <= i__1; ++p) {
2338 dcopy_(&nr, &v[p * v_dim1 + 1], &c__1, &u[p * u_dim1
2340 dscal_(&nr, &sva[p], &u[p * u_dim1 + 1], &c__1);
2343 dtrsm_("L", "U", "N", "N", &nr, &nr, &c_b35, &work[(*n <<
2344 1) + 1], n, &u[u_offset], ldu);
2346 for (q = 1; q <= i__1; ++q) {
2348 for (p = 1; p <= i__2; ++p) {
2349 work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
2354 for (p = 1; p <= i__2; ++p) {
2355 u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
2363 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
2366 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
2370 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
2371 + (nr + 1) * v_dim1], ldv);
2373 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2374 dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
2375 work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
2376 + *n * nr + nr + 1], &i__1, &ierr);
2378 /* Last line of defense. */
2379 /* #:( This is a rather pathological case: no scaled condition */
2380 /* improvement after two pivoted QR factorizations. Other */
2381 /* possibility is that the rank revealing QR factorization */
2382 /* or the condition estimator has failed, or the COND_OK */
2383 /* is set very close to ONE (which is unnecessary). Normally, */
2384 /* this branch should never be executed, but in rare cases of */
2385 /* failure of the RRQR or condition estimator, the last line of */
2386 /* defense ensures that DGEJSV completes the task. */
2387 /* Compute the full SVD of L3 using DGESVJ with explicit */
2388 /* accumulation of Jacobi rotations. */
2389 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2390 dgesvj_("L", "U", "V", &nr, &nr, &v[v_offset], ldv, &sva[
2391 1], &nr, &u[u_offset], ldu, &work[(*n << 1) + *n *
2392 nr + nr + 1], &i__1, info);
2393 scalem = work[(*n << 1) + *n * nr + nr + 1];
2394 numrank = i_dnnt(&work[(*n << 1) + *n * nr + nr + 2]);
2397 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 +
2400 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) *
2404 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1
2405 + (nr + 1) * v_dim1], ldv);
2407 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2408 dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &
2409 work[*n + 1], &v[v_offset], ldv, &work[(*n << 1)
2410 + *n * nr + nr + 1], &i__1, &ierr);
2412 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2413 dormlq_("L", "T", &nr, &nr, &nr, &work[(*n << 1) + 1], n,
2414 &work[(*n << 1) + *n * nr + 1], &u[u_offset], ldu,
2415 &work[(*n << 1) + *n * nr + nr + 1], &i__1, &
2418 for (q = 1; q <= i__1; ++q) {
2420 for (p = 1; p <= i__2; ++p) {
2421 work[(*n << 1) + *n * nr + nr + iwork[*n + p]] =
2426 for (p = 1; p <= i__2; ++p) {
2427 u[p + q * u_dim1] = work[(*n << 1) + *n * nr + nr
2436 /* Permute the rows of V using the (column) permutation from the */
2437 /* first QRF. Also, scale the columns to make them unit in */
2438 /* Euclidean norm. This applies to all cases. */
2440 temp1 = sqrt((doublereal) (*n)) * epsln;
2442 for (q = 1; q <= i__1; ++q) {
2444 for (p = 1; p <= i__2; ++p) {
2445 work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
2450 for (p = 1; p <= i__2; ++p) {
2451 v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p]
2455 xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
2456 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
2457 dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
2461 /* At this moment, V contains the right singular vectors of A. */
2462 /* Next, assemble the left singular vector matrix U (M x N). */
2465 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 +
2469 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
2473 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1
2474 + (nr + 1) * u_dim1], ldu);
2478 /* The Q matrix from the first QRF is built into the left singular */
2479 /* matrix U. This applies to all cases. */
2482 dormqr_("Left", "No_Tr", m, &n1, n, &a[a_offset], lda, &work[
2483 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
2484 /* The columns of U are normalized. The cost is O(M*N) flops. */
2485 temp1 = sqrt((doublereal) (*m)) * epsln;
2487 for (p = 1; p <= i__1; ++p) {
2488 xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
2489 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
2490 dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
2495 /* If the initial QRF is computed with row pivoting, the left */
2496 /* singular vectors must be adjusted. */
2500 dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
2506 /* the second QRF is not needed */
2508 dlacpy_("Upper", n, n, &a[a_offset], lda, &work[*n + 1], n);
2512 for (p = 2; p <= i__1; ++p) {
2513 temp1 = xsc * work[*n + (p - 1) * *n + p];
2515 for (q = 1; q <= i__2; ++q) {
2516 work[*n + (q - 1) * *n + p] = -d_sign(&temp1, &
2517 work[*n + (p - 1) * *n + q]);
2525 dlaset_("Lower", &i__1, &i__2, &c_b34, &c_b34, &work[*n +
2529 i__1 = *lwork - *n - *n * *n;
2530 dgesvj_("Upper", "U", "N", n, n, &work[*n + 1], n, &sva[1], n,
2531 &u[u_offset], ldu, &work[*n + *n * *n + 1], &i__1,
2534 scalem = work[*n + *n * *n + 1];
2535 numrank = i_dnnt(&work[*n + *n * *n + 2]);
2537 for (p = 1; p <= i__1; ++p) {
2538 dcopy_(n, &work[*n + (p - 1) * *n + 1], &c__1, &u[p *
2539 u_dim1 + 1], &c__1);
2540 dscal_(n, &sva[p], &work[*n + (p - 1) * *n + 1], &c__1);
2544 dtrsm_("Left", "Upper", "NoTrans", "No UD", n, n, &c_b35, &a[
2545 a_offset], lda, &work[*n + 1], n);
2547 for (p = 1; p <= i__1; ++p) {
2548 dcopy_(n, &work[*n + p], n, &v[iwork[p] + v_dim1], ldv);
2551 temp1 = sqrt((doublereal) (*n)) * epsln;
2553 for (p = 1; p <= i__1; ++p) {
2554 xsc = 1. / dnrm2_(n, &v[p * v_dim1 + 1], &c__1);
2555 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
2556 dscal_(n, &xsc, &v[p * v_dim1 + 1], &c__1);
2561 /* Assemble the left singular vector matrix U (M x N). */
2565 dlaset_("A", &i__1, n, &c_b34, &c_b34, &u[*n + 1 + u_dim1]
2569 dlaset_("A", n, &i__1, &c_b34, &c_b34, &u[(*n + 1) *
2573 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[*n + 1
2574 + (*n + 1) * u_dim1], ldu);
2578 dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[
2579 1], &u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
2580 temp1 = sqrt((doublereal) (*m)) * epsln;
2582 for (p = 1; p <= i__1; ++p) {
2583 xsc = 1. / dnrm2_(m, &u[p * u_dim1 + 1], &c__1);
2584 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
2585 dscal_(m, &xsc, &u[p * u_dim1 + 1], &c__1);
2592 dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n
2598 /* end of the >> almost orthogonal case << in the full SVD */
2602 /* This branch deploys a preconditioned Jacobi SVD with explicitly */
2603 /* accumulated rotations. It is included as optional, mainly for */
2604 /* experimental purposes. It does perform well, and can also be used. */
2605 /* In this implementation, this branch will be automatically activated */
2606 /* if the condition number sigma_max(A) / sigma_min(A) is predicted */
2607 /* to be greater than the overflow threshold. This is because the */
2608 /* a posteriori computation of the singular vectors assumes robust */
2609 /* implementation of BLAS and some LAPACK procedures, capable of working */
2610 /* in presence of extreme values. Since that is not always the case, ... */
2613 for (p = 1; p <= i__1; ++p) {
2615 dcopy_(&i__2, &a[p + p * a_dim1], lda, &v[p + p * v_dim1], &
2621 xsc = sqrt(small / epsln);
2623 for (q = 1; q <= i__1; ++q) {
2624 temp1 = xsc * (d__1 = v[q + q * v_dim1], abs(d__1));
2626 for (p = 1; p <= i__2; ++p) {
2627 if (p > q && (d__1 = v[p + q * v_dim1], abs(d__1)) <=
2629 v[p + q * v_dim1] = d_sign(&temp1, &v[p + q *
2633 v[p + q * v_dim1] = -v[p + q * v_dim1];
2642 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &v[(v_dim1 << 1) +
2645 i__1 = *lwork - (*n << 1);
2646 dgeqrf_(n, &nr, &v[v_offset], ldv, &work[*n + 1], &work[(*n << 1)
2647 + 1], &i__1, &ierr);
2648 dlacpy_("L", n, &nr, &v[v_offset], ldv, &work[(*n << 1) + 1], n);
2651 for (p = 1; p <= i__1; ++p) {
2653 dcopy_(&i__2, &v[p + p * v_dim1], ldv, &u[p + p * u_dim1], &
2658 xsc = sqrt(small / epsln);
2660 for (q = 2; q <= i__1; ++q) {
2662 for (p = 1; p <= i__2; ++p) {
2664 d__3 = (d__1 = u[p + p * u_dim1], abs(d__1)), d__4 = (
2665 d__2 = u[q + q * u_dim1], abs(d__2));
2666 temp1 = xsc * f2cmin(d__3,d__4);
2667 u[p + q * u_dim1] = -d_sign(&temp1, &u[q + p * u_dim1]
2676 dlaset_("U", &i__1, &i__2, &c_b34, &c_b34, &u[(u_dim1 << 1) +
2679 i__1 = *lwork - (*n << 1) - *n * nr;
2680 dgesvj_("G", "U", "V", &nr, &nr, &u[u_offset], ldu, &sva[1], n, &
2681 v[v_offset], ldv, &work[(*n << 1) + *n * nr + 1], &i__1,
2683 scalem = work[(*n << 1) + *n * nr + 1];
2684 numrank = i_dnnt(&work[(*n << 1) + *n * nr + 2]);
2687 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &v[nr + 1 + v_dim1],
2690 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &v[(nr + 1) * v_dim1
2694 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &v[nr + 1 + (nr +
2697 i__1 = *lwork - (*n << 1) - *n * nr - nr;
2698 dormqr_("L", "N", n, n, &nr, &work[(*n << 1) + 1], n, &work[*n +
2699 1], &v[v_offset], ldv, &work[(*n << 1) + *n * nr + nr + 1]
2702 /* Permute the rows of V using the (column) permutation from the */
2703 /* first QRF. Also, scale the columns to make them unit in */
2704 /* Euclidean norm. This applies to all cases. */
2706 temp1 = sqrt((doublereal) (*n)) * epsln;
2708 for (q = 1; q <= i__1; ++q) {
2710 for (p = 1; p <= i__2; ++p) {
2711 work[(*n << 1) + *n * nr + nr + iwork[p]] = v[p + q *
2716 for (p = 1; p <= i__2; ++p) {
2717 v[p + q * v_dim1] = work[(*n << 1) + *n * nr + nr + p];
2720 xsc = 1. / dnrm2_(n, &v[q * v_dim1 + 1], &c__1);
2721 if (xsc < 1. - temp1 || xsc > temp1 + 1.) {
2722 dscal_(n, &xsc, &v[q * v_dim1 + 1], &c__1);
2727 /* At this moment, V contains the right singular vectors of A. */
2728 /* Next, assemble the left singular vector matrix U (M x N). */
2732 dlaset_("A", &i__1, &nr, &c_b34, &c_b34, &u[nr + 1 + u_dim1],
2736 dlaset_("A", &nr, &i__1, &c_b34, &c_b34, &u[(nr + 1) *
2740 dlaset_("A", &i__1, &i__2, &c_b34, &c_b35, &u[nr + 1 + (
2741 nr + 1) * u_dim1], ldu);
2746 dormqr_("Left", "No Tr", m, &n1, n, &a[a_offset], lda, &work[1], &
2747 u[u_offset], ldu, &work[*n + 1], &i__1, &ierr);
2751 dlaswp_(&n1, &u[u_offset], ldu, &c__1, &i__1, &iwork[(*n << 1)
2759 for (p = 1; p <= i__1; ++p) {
2760 dswap_(n, &u[p * u_dim1 + 1], &c__1, &v[p * v_dim1 + 1], &
2767 /* end of the full SVD */
2769 /* Undo scaling, if necessary (and possible) */
2771 if (uscal2 <= big / sva[1] * uscal1) {
2772 dlascl_("G", &c__0, &c__0, &uscal1, &uscal2, &nr, &c__1, &sva[1], n, &
2780 for (p = nr + 1; p <= i__1; ++p) {
2786 work[1] = uscal2 * scalem;
2791 if (lsvec && rsvec) {