14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static real c_b17 = 0.f;
516 static real c_b18 = 1.f;
517 static integer c__1 = 1;
518 static integer c__0 = 0;
519 static integer c__2 = 2;
521 /* > \brief \b SGESVJ */
523 /* =========== DOCUMENTATION =========== */
525 /* Online html documentation available at */
526 /* http://www.netlib.org/lapack/explore-html/ */
529 /* > Download SGESVJ + dependencies */
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/sgesvj.
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/sgesvj.
536 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/sgesvj.
544 /* SUBROUTINE SGESVJ( JOBA, JOBU, JOBV, M, N, A, LDA, SVA, MV, V, */
545 /* LDV, WORK, LWORK, INFO ) */
547 /* INTEGER INFO, LDA, LDV, LWORK, M, MV, N */
548 /* CHARACTER*1 JOBA, JOBU, JOBV */
549 /* REAL A( LDA, * ), SVA( N ), V( LDV, * ), */
550 /* $ WORK( LWORK ) */
553 /* > \par Purpose: */
558 /* > SGESVJ computes the singular value decomposition (SVD) of a real */
559 /* > M-by-N matrix A, where M >= N. The SVD of A is written as */
560 /* > [++] [xx] [x0] [xx] */
561 /* > A = U * SIGMA * V^t, [++] = [xx] * [ox] * [xx] */
563 /* > where SIGMA is an N-by-N diagonal matrix, U is an M-by-N orthonormal */
564 /* > matrix, and V is an N-by-N orthogonal matrix. The diagonal elements */
565 /* > of SIGMA are the singular values of A. The columns of U and V are the */
566 /* > left and the right singular vectors of A, respectively. */
567 /* > SGESVJ can sometimes compute tiny singular values and their singular vectors much */
568 /* > more accurately than other SVD routines, see below under Further Details. */
574 /* > \param[in] JOBA */
576 /* > JOBA is CHARACTER*1 */
577 /* > Specifies the structure of A. */
578 /* > = 'L': The input matrix A is lower triangular; */
579 /* > = 'U': The input matrix A is upper triangular; */
580 /* > = 'G': The input matrix A is general M-by-N matrix, M >= N. */
583 /* > \param[in] JOBU */
585 /* > JOBU is CHARACTER*1 */
586 /* > Specifies whether to compute the left singular vectors */
587 /* > (columns of U): */
588 /* > = 'U': The left singular vectors corresponding to the nonzero */
589 /* > singular values are computed and returned in the leading */
590 /* > columns of A. See more details in the description of A. */
591 /* > The default numerical orthogonality threshold is set to */
592 /* > approximately TOL=CTOL*EPS, CTOL=SQRT(M), EPS=SLAMCH('E'). */
593 /* > = 'C': Analogous to JOBU='U', except that user can control the */
594 /* > level of numerical orthogonality of the computed left */
595 /* > singular vectors. TOL can be set to TOL = CTOL*EPS, where */
596 /* > CTOL is given on input in the array WORK. */
597 /* > No CTOL smaller than ONE is allowed. CTOL greater */
598 /* > than 1 / EPS is meaningless. The option 'C' */
599 /* > can be used if M*EPS is satisfactory orthogonality */
600 /* > of the computed left singular vectors, so CTOL=M could */
601 /* > save few sweeps of Jacobi rotations. */
602 /* > See the descriptions of A and WORK(1). */
603 /* > = 'N': The matrix U is not computed. However, see the */
604 /* > description of A. */
607 /* > \param[in] JOBV */
609 /* > JOBV is CHARACTER*1 */
610 /* > Specifies whether to compute the right singular vectors, that */
611 /* > is, the matrix V: */
612 /* > = 'V': the matrix V is computed and returned in the array V */
613 /* > = 'A': the Jacobi rotations are applied to the MV-by-N */
614 /* > array V. In other words, the right singular vector */
615 /* > matrix V is not computed explicitly; instead it is */
616 /* > applied to an MV-by-N matrix initially stored in the */
617 /* > first MV rows of V. */
618 /* > = 'N': the matrix V is not computed and the array V is not */
625 /* > The number of rows of the input matrix A. 1/SLAMCH('E') > M >= 0. */
631 /* > The number of columns of the input matrix A. */
635 /* > \param[in,out] A */
637 /* > A is REAL array, dimension (LDA,N) */
638 /* > On entry, the M-by-N matrix A. */
640 /* > If JOBU = 'U' .OR. JOBU = 'C': */
642 /* > RANKA orthonormal columns of U are returned in the */
643 /* > leading RANKA columns of the array A. Here RANKA <= N */
644 /* > is the number of computed singular values of A that are */
645 /* > above the underflow threshold SLAMCH('S'). The singular */
646 /* > vectors corresponding to underflowed or zero singular */
647 /* > values are not computed. The value of RANKA is returned */
648 /* > in the array WORK as RANKA=NINT(WORK(2)). Also see the */
649 /* > descriptions of SVA and WORK. The computed columns of U */
650 /* > are mutually numerically orthogonal up to approximately */
651 /* > TOL=SQRT(M)*EPS (default); or TOL=CTOL*EPS (JOBU = 'C'), */
652 /* > see the description of JOBU. */
654 /* > the procedure SGESVJ did not converge in the given number */
655 /* > of iterations (sweeps). In that case, the computed */
656 /* > columns of U may not be orthogonal up to TOL. The output */
657 /* > U (stored in A), SIGMA (given by the computed singular */
658 /* > values in SVA(1:N)) and V is still a decomposition of the */
659 /* > input matrix A in the sense that the residual */
660 /* > ||A-SCALE*U*SIGMA*V^T||_2 / ||A||_2 is small. */
661 /* > If JOBU = 'N': */
663 /* > Note that the left singular vectors are 'for free' in the */
664 /* > one-sided Jacobi SVD algorithm. However, if only the */
665 /* > singular values are needed, the level of numerical */
666 /* > orthogonality of U is not an issue and iterations are */
667 /* > stopped when the columns of the iterated matrix are */
668 /* > numerically orthogonal up to approximately M*EPS. Thus, */
669 /* > on exit, A contains the columns of U scaled with the */
670 /* > corresponding singular values. */
672 /* > the procedure SGESVJ did not converge in the given number */
673 /* > of iterations (sweeps). */
676 /* > \param[in] LDA */
678 /* > LDA is INTEGER */
679 /* > The leading dimension of the array A. LDA >= f2cmax(1,M). */
682 /* > \param[out] SVA */
684 /* > SVA is REAL array, dimension (N) */
686 /* > If INFO = 0 : */
687 /* > depending on the value SCALE = WORK(1), we have: */
688 /* > If SCALE = ONE: */
689 /* > SVA(1:N) contains the computed singular values of A. */
690 /* > During the computation SVA contains the Euclidean column */
691 /* > norms of the iterated matrices in the array A. */
692 /* > If SCALE .NE. ONE: */
693 /* > The singular values of A are SCALE*SVA(1:N), and this */
694 /* > factored representation is due to the fact that some of the */
695 /* > singular values of A might underflow or overflow. */
697 /* > If INFO > 0 : */
698 /* > the procedure SGESVJ did not converge in the given number of */
699 /* > iterations (sweeps) and SCALE*SVA(1:N) may not be accurate. */
702 /* > \param[in] MV */
704 /* > MV is INTEGER */
705 /* > If JOBV = 'A', then the product of Jacobi rotations in SGESVJ */
706 /* > is applied to the first MV rows of V. See the description of JOBV. */
709 /* > \param[in,out] V */
711 /* > V is REAL array, dimension (LDV,N) */
712 /* > If JOBV = 'V', then V contains on exit the N-by-N matrix of */
713 /* > the right singular vectors; */
714 /* > If JOBV = 'A', then V contains the product of the computed right */
715 /* > singular vector matrix and the initial matrix in */
717 /* > If JOBV = 'N', then V is not referenced. */
720 /* > \param[in] LDV */
722 /* > LDV is INTEGER */
723 /* > The leading dimension of the array V, LDV >= 1. */
724 /* > If JOBV = 'V', then LDV >= f2cmax(1,N). */
725 /* > If JOBV = 'A', then LDV >= f2cmax(1,MV) . */
728 /* > \param[in,out] WORK */
730 /* > WORK is REAL array, dimension (LWORK) */
732 /* > If JOBU = 'C' : */
733 /* > WORK(1) = CTOL, where CTOL defines the threshold for convergence. */
734 /* > The process stops if all columns of A are mutually */
735 /* > orthogonal up to CTOL*EPS, EPS=SLAMCH('E'). */
736 /* > It is required that CTOL >= ONE, i.e. it is not */
737 /* > allowed to force the routine to obtain orthogonality */
738 /* > below EPSILON. */
740 /* > WORK(1) = SCALE is the scaling factor such that SCALE*SVA(1:N) */
741 /* > are the computed singular vcalues of A. */
742 /* > (See description of SVA().) */
743 /* > WORK(2) = NINT(WORK(2)) is the number of the computed nonzero */
744 /* > singular values. */
745 /* > WORK(3) = NINT(WORK(3)) is the number of the computed singular */
746 /* > values that are larger than the underflow threshold. */
747 /* > WORK(4) = NINT(WORK(4)) is the number of sweeps of Jacobi */
748 /* > rotations needed for numerical convergence. */
749 /* > WORK(5) = max_{i.NE.j} |COS(A(:,i),A(:,j))| in the last sweep. */
750 /* > This is useful information in cases when SGESVJ did */
751 /* > not converge, as it can be used to estimate whether */
752 /* > the output is still useful and for post festum analysis. */
753 /* > WORK(6) = the largest absolute value over all sines of the */
754 /* > Jacobi rotation angles in the last sweep. It can be */
755 /* > useful for a post festum analysis. */
758 /* > \param[in] LWORK */
760 /* > LWORK is INTEGER */
761 /* > length of WORK, WORK >= MAX(6,M+N) */
764 /* > \param[out] INFO */
766 /* > INFO is INTEGER */
767 /* > = 0: successful exit. */
768 /* > < 0: if INFO = -i, then the i-th argument had an illegal value */
769 /* > > 0: SGESVJ did not converge in the maximal allowed number (30) */
770 /* > of sweeps. The output may still be useful. See the */
771 /* > description of WORK. */
777 /* > \author Univ. of Tennessee */
778 /* > \author Univ. of California Berkeley */
779 /* > \author Univ. of Colorado Denver */
780 /* > \author NAG Ltd. */
782 /* > \date June 2017 */
784 /* > \ingroup realGEcomputational */
786 /* > \par Further Details: */
787 /* ===================== */
789 /* > The orthogonal N-by-N matrix V is obtained as a product of Jacobi plane */
790 /* > rotations. The rotations are implemented as fast scaled rotations of */
791 /* > Anda and Park [1]. In the case of underflow of the Jacobi angle, a */
792 /* > modified Jacobi transformation of Drmac [4] is used. Pivot strategy uses */
793 /* > column interchanges of de Rijk [2]. The relative accuracy of the computed */
794 /* > singular values and the accuracy of the computed singular vectors (in */
795 /* > angle metric) is as guaranteed by the theory of Demmel and Veselic [3]. */
796 /* > The condition number that determines the accuracy in the full rank case */
797 /* > is essentially min_{D=diag} kappa(A*D), where kappa(.) is the */
798 /* > spectral condition number. The best performance of this Jacobi SVD */
799 /* > procedure is achieved if used in an accelerated version of Drmac and */
800 /* > Veselic [5,6], and it is the kernel routine in the SIGMA library [7]. */
801 /* > Some tunning parameters (marked with [TP]) are available for the */
802 /* > implementer. \n */
803 /* > The computational range for the nonzero singular values is the machine */
804 /* > number interval ( UNDERFLOW , OVERFLOW ). In extreme cases, even */
805 /* > denormalized singular values can be computed with the corresponding */
806 /* > gradual loss of accurate digits. */
808 /* > \par Contributors: */
809 /* ================== */
811 /* > Zlatko Drmac (Zagreb, Croatia) and Kresimir Veselic (Hagen, Germany) */
813 /* > \par References: */
814 /* ================ */
816 /* > [1] A. A. Anda and H. Park: Fast plane rotations with dynamic scaling. \n */
817 /* > SIAM J. matrix Anal. Appl., Vol. 15 (1994), pp. 162-174. \n\n */
818 /* > [2] P. P. M. De Rijk: A one-sided Jacobi algorithm for computing the */
819 /* > singular value decomposition on a vector computer. \n */
820 /* > SIAM J. Sci. Stat. Comp., Vol. 10 (1998), pp. 359-371. \n\n */
821 /* > [3] J. Demmel and K. Veselic: Jacobi method is more accurate than QR. \n */
822 /* > [4] Z. Drmac: Implementation of Jacobi rotations for accurate singular */
823 /* > value computation in floating point arithmetic. \n */
824 /* > SIAM J. Sci. Comp., Vol. 18 (1997), pp. 1200-1222. \n\n */
825 /* > [5] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm I. \n */
826 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1322-1342. \n */
827 /* > LAPACK Working note 169. \n\n */
828 /* > [6] Z. Drmac and K. Veselic: New fast and accurate Jacobi SVD algorithm II. \n */
829 /* > SIAM J. Matrix Anal. Appl. Vol. 35, No. 2 (2008), pp. 1343-1362. \n */
830 /* > LAPACK Working note 170. \n\n */
831 /* > [7] Z. Drmac: SIGMA - mathematical software library for accurate SVD, PSV, */
832 /* > QSVD, (H,K)-SVD computations.\n */
833 /* > Department of Mathematics, University of Zagreb, 2008. */
835 /* > \par Bugs, Examples and Comments: */
836 /* ================================= */
838 /* > Please report all bugs and send interesting test examples and comments to */
839 /* > drmac@math.hr. Thank you. */
841 /* ===================================================================== */
842 /* Subroutine */ int sgesvj_(char *joba, char *jobu, char *jobv, integer *m,
843 integer *n, real *a, integer *lda, real *sva, integer *mv, real *v,
844 integer *ldv, real *work, integer *lwork, integer *info)
846 /* System generated locals */
847 integer a_dim1, a_offset, v_dim1, v_offset, i__1, i__2, i__3, i__4, i__5;
850 /* Local variables */
851 real aapp, aapq, aaqq, ctol;
854 extern real sdot_(integer *, real *, integer *, real *, integer *);
857 extern real snrm2_(integer *, real *, integer *);
859 real t, large, apoaq, aqoap;
860 extern logical lsame_(char *, char *);
862 extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *);
865 real fastr[5], epsln;
866 logical applv, rsvec, uctol, lower, upper;
867 extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
871 extern /* Subroutine */ int sswap_(integer *, real *, integer *, real *,
874 extern /* Subroutine */ int saxpy_(integer *, real *, real *, integer *,
875 real *, integer *), srotm_(integer *, real *, integer *, real *,
878 extern /* Subroutine */ int sgsvj0_(char *, integer *, integer *, real *,
879 integer *, real *, real *, integer *, real *, integer *, real *,
880 real *, real *, integer *, real *, integer *, integer *),
881 sgsvj1_(char *, integer *, integer *, integer *, real *, integer *
882 , real *, real *, integer *, real *, integer *, real *, real *,
883 real *, integer *, real *, integer *, integer *);
886 extern real slamch_(char *);
887 extern /* Subroutine */ int xerbla_(char *, integer *, ftnlen);
888 integer ijblsk, swband;
889 extern /* Subroutine */ int slascl_(char *, integer *, integer *, real *,
890 real *, integer *, integer *, real *, integer *, integer *);
891 extern integer isamax_(integer *, real *, integer *);
894 extern /* Subroutine */ int slaset_(char *, integer *, integer *, real *,
895 real *, real *, integer *);
897 extern /* Subroutine */ int slassq_(integer *, real *, integer *, real *,
900 integer ir1, emptsw, notrot, iswrot, jbc;
902 integer kbl, lkahead, igl, ibr, jgl, nbl;
908 real rootbig, rooteps;
913 /* -- LAPACK computational routine (version 3.7.1) -- */
914 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
915 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
919 /* ===================================================================== */
927 /* Test the input arguments */
929 /* Parameter adjustments */
932 a_offset = 1 + a_dim1 * 1;
935 v_offset = 1 + v_dim1 * 1;
940 lsvec = lsame_(jobu, "U");
941 uctol = lsame_(jobu, "C");
942 rsvec = lsame_(jobv, "V");
943 applv = lsame_(jobv, "A");
944 upper = lsame_(joba, "U");
945 lower = lsame_(joba, "L");
947 if (! (upper || lower || lsame_(joba, "G"))) {
949 } else if (! (lsvec || uctol || lsame_(jobu, "N")))
952 } else if (! (rsvec || applv || lsame_(jobv, "N")))
957 } else if (*n < 0 || *n > *m) {
959 } else if (*lda < *m) {
961 } else if (*mv < 0) {
963 } else if (rsvec && *ldv < *n || applv && *ldv < *mv) {
965 } else if (uctol && work[1] <= 1.f) {
967 } else /* if(complicated condition) */ {
970 if (*lwork < f2cmax(i__1,6)) {
980 xerbla_("SGESVJ", &i__1, (ftnlen)6);
984 /* #:) Quick return for void matrix */
986 if (*m == 0 || *n == 0) {
990 /* Set numerical parameters */
991 /* The stopping criterion for Jacobi rotations is */
993 /* max_{i<>j}|A(:,i)^T * A(:,j)|/(||A(:,i)||*||A(:,j)||) < CTOL*EPS */
995 /* where EPS is the round-off and CTOL is defined as follows: */
998 /* ... user controlled */
1002 if (lsvec || rsvec || applv) {
1003 ctol = sqrt((real) (*m));
1008 /* ... and the machine dependent parameters are */
1009 /* [!] (Make sure that SLAMCH() works properly on the target machine.) */
1011 epsln = slamch_("Epsilon");
1012 rooteps = sqrt(epsln);
1013 sfmin = slamch_("SafeMinimum");
1014 rootsfmin = sqrt(sfmin);
1015 small = sfmin / epsln;
1016 big = slamch_("Overflow");
1017 /* BIG = ONE / SFMIN */
1018 rootbig = 1.f / rootsfmin;
1019 large = big / sqrt((real) (*m * *n));
1020 bigtheta = 1.f / rooteps;
1023 roottol = sqrt(tol);
1025 if ((real) (*m) * epsln >= 1.f) {
1028 xerbla_("SGESVJ", &i__1, (ftnlen)6);
1032 /* Initialize the right singular vector matrix. */
1036 slaset_("A", &mvl, n, &c_b17, &c_b18, &v[v_offset], ldv);
1040 rsvec = rsvec || applv;
1042 /* Initialize SVA( 1:N ) = ( ||A e_i||_2, i = 1:N ) */
1043 /* (!) If necessary, scale A to protect the largest singular value */
1044 /* from overflow. It is possible that saving the largest singular */
1045 /* value destroys the information about the small ones. */
1046 /* This initial scaling is almost minimal in the sense that the */
1047 /* goal is to make sure that no column norm overflows, and that */
1048 /* SQRT(N)*max_i SVA(i) does not overflow. If INFinite entries */
1049 /* in A are detected, the procedure returns with INFO=-6. */
1051 skl = 1.f / sqrt((real) (*m) * (real) (*n));
1056 /* the input matrix is M-by-N lower triangular (trapezoidal) */
1058 for (p = 1; p <= i__1; ++p) {
1062 slassq_(&i__2, &a[p + p * a_dim1], &c__1, &aapp, &aaqq);
1066 xerbla_("SGESVJ", &i__2, (ftnlen)6);
1070 if (aapp < big / aaqq && noscale) {
1071 sva[p] = aapp * aaqq;
1074 sva[p] = aapp * (aaqq * skl);
1078 for (q = 1; q <= i__2; ++q) {
1087 /* the input matrix is M-by-N upper triangular (trapezoidal) */
1089 for (p = 1; p <= i__1; ++p) {
1092 slassq_(&p, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
1096 xerbla_("SGESVJ", &i__2, (ftnlen)6);
1100 if (aapp < big / aaqq && noscale) {
1101 sva[p] = aapp * aaqq;
1104 sva[p] = aapp * (aaqq * skl);
1108 for (q = 1; q <= i__2; ++q) {
1117 /* the input matrix is M-by-N general dense */
1119 for (p = 1; p <= i__1; ++p) {
1122 slassq_(m, &a[p * a_dim1 + 1], &c__1, &aapp, &aaqq);
1126 xerbla_("SGESVJ", &i__2, (ftnlen)6);
1130 if (aapp < big / aaqq && noscale) {
1131 sva[p] = aapp * aaqq;
1134 sva[p] = aapp * (aaqq * skl);
1138 for (q = 1; q <= i__2; ++q) {
1152 /* Move the smaller part of the spectrum from the underflow threshold */
1153 /* (!) Start by determining the position of the nonzero entries of the */
1154 /* array SVA() relative to ( SFMIN, BIG ). */
1159 for (p = 1; p <= i__1; ++p) {
1160 if (sva[p] != 0.f) {
1162 r__1 = aaqq, r__2 = sva[p];
1163 aaqq = f2cmin(r__1,r__2);
1166 r__1 = aapp, r__2 = sva[p];
1167 aapp = f2cmax(r__1,r__2);
1171 /* #:) Quick return for zero matrix */
1175 slaset_("G", m, n, &c_b17, &c_b18, &a[a_offset], lda);
1186 /* #:) Quick return for one-column matrix */
1190 slascl_("G", &c__0, &c__0, &sva[1], &skl, m, &c__1, &a[a_dim1 + 1]
1193 work[1] = 1.f / skl;
1194 if (sva[1] >= sfmin) {
1206 /* Protect small singular values from underflow, and try to */
1207 /* avoid underflows/overflows in computing Jacobi rotations. */
1209 sn = sqrt(sfmin / epsln);
1210 temp1 = sqrt(big / (real) (*n));
1211 if (aapp <= sn || aaqq >= temp1 || sn <= aaqq && aapp <= temp1) {
1213 r__1 = big, r__2 = temp1 / aapp;
1214 temp1 = f2cmin(r__1,r__2);
1215 /* AAQQ = AAQQ*TEMP1 */
1216 /* AAPP = AAPP*TEMP1 */
1217 } else if (aaqq <= sn && aapp <= temp1) {
1219 r__1 = sn / aaqq, r__2 = big / (aapp * sqrt((real) (*n)));
1220 temp1 = f2cmin(r__1,r__2);
1221 /* AAQQ = AAQQ*TEMP1 */
1222 /* AAPP = AAPP*TEMP1 */
1223 } else if (aaqq >= sn && aapp >= temp1) {
1225 r__1 = sn / aaqq, r__2 = temp1 / aapp;
1226 temp1 = f2cmax(r__1,r__2);
1227 /* AAQQ = AAQQ*TEMP1 */
1228 /* AAPP = AAPP*TEMP1 */
1229 } else if (aaqq <= sn && aapp >= temp1) {
1231 r__1 = sn / aaqq, r__2 = big / (sqrt((real) (*n)) * aapp);
1232 temp1 = f2cmin(r__1,r__2);
1233 /* AAQQ = AAQQ*TEMP1 */
1234 /* AAPP = AAPP*TEMP1 */
1239 /* Scale, if necessary */
1242 slascl_("G", &c__0, &c__0, &c_b18, &temp1, n, &c__1, &sva[1], n, &
1247 slascl_(joba, &c__0, &c__0, &c_b18, &skl, m, n, &a[a_offset], lda, &
1252 /* Row-cyclic Jacobi SVD algorithm with column pivoting */
1254 emptsw = *n * (*n - 1) / 2;
1258 /* A is represented in factored form A = A * diag(WORK), where diag(WORK) */
1259 /* is initialized to identity. WORK is updated during fast scaled */
1263 for (q = 1; q <= i__1; ++q) {
1270 /* [TP] SWBAND is a tuning parameter [TP]. It is meaningful and effective */
1271 /* if SGESVJ is used as a computational routine in the preconditioned */
1272 /* Jacobi SVD algorithm SGESVJ. For sweeps i=1:SWBAND the procedure */
1273 /* works on pivots inside a band-like region around the diagonal. */
1274 /* The boundaries are determined dynamically, based on the number of */
1275 /* pivots above a threshold. */
1278 /* [TP] KBL is a tuning parameter that defines the tile size in the */
1279 /* tiling of the p-q loops of pivot pairs. In general, an optimal */
1280 /* value of KBL depends on the matrix dimensions and on the */
1281 /* parameters of the computer's memory. */
1284 if (nbl * kbl != *n) {
1288 /* Computing 2nd power */
1290 blskip = i__1 * i__1;
1291 /* [TP] BLKSKIP is a tuning parameter that depends on SWBAND and KBL. */
1293 rowskip = f2cmin(5,kbl);
1294 /* [TP] ROWSKIP is a tuning parameter. */
1297 /* [TP] LKAHEAD is a tuning parameter. */
1299 /* Quasi block transformations, using the lower (upper) triangular */
1300 /* structure of the input matrix. The quasi-block-cycling usually */
1301 /* invokes cubic convergence. Big part of this cycle is done inside */
1302 /* canonical subspaces of dimensions less than M. */
1305 i__1 = 64, i__2 = kbl << 2;
1306 if ((lower || upper) && *n > f2cmax(i__1,i__2)) {
1307 /* [TP] The number of partition levels and the actual partition are */
1308 /* tuning parameters. */
1320 /* This works very well on lower triangular matrices, in particular */
1321 /* in the framework of the preconditioned Jacobi SVD (xGEJSV). */
1322 /* The idea is simple: */
1323 /* [+ 0 0 0] Note that Jacobi transformations of [0 0] */
1324 /* [+ + 0 0] [0 0] */
1325 /* [+ + x 0] actually work on [x 0] [x 0] */
1326 /* [+ + x x] [x x]. [x x] */
1331 sgsvj0_(jobv, &i__1, &i__2, &a[n34 + 1 + (n34 + 1) * a_dim1], lda,
1332 &work[n34 + 1], &sva[n34 + 1], &mvl, &v[n34 * q + 1 + (
1333 n34 + 1) * v_dim1], ldv, &epsln, &sfmin, &tol, &c__2, &
1334 work[*n + 1], &i__3, &ierr);
1339 sgsvj0_(jobv, &i__1, &i__2, &a[n2 + 1 + (n2 + 1) * a_dim1], lda, &
1340 work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1)
1341 * v_dim1], ldv, &epsln, &sfmin, &tol, &c__2, &work[*n +
1347 sgsvj1_(jobv, &i__1, &i__2, &n4, &a[n2 + 1 + (n2 + 1) * a_dim1],
1348 lda, &work[n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (
1349 n2 + 1) * v_dim1], ldv, &epsln, &sfmin, &tol, &c__1, &
1350 work[*n + 1], &i__3, &ierr);
1355 sgsvj0_(jobv, &i__1, &i__2, &a[n4 + 1 + (n4 + 1) * a_dim1], lda, &
1356 work[n4 + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1)
1357 * v_dim1], ldv, &epsln, &sfmin, &tol, &c__1, &work[*n +
1361 sgsvj0_(jobv, m, &n4, &a[a_offset], lda, &work[1], &sva[1], &mvl,
1362 &v[v_offset], ldv, &epsln, &sfmin, &tol, &c__1, &work[*n
1363 + 1], &i__1, &ierr);
1366 sgsvj1_(jobv, m, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1], &
1367 mvl, &v[v_offset], ldv, &epsln, &sfmin, &tol, &c__1, &
1368 work[*n + 1], &i__1, &ierr);
1375 sgsvj0_(jobv, &n4, &n4, &a[a_offset], lda, &work[1], &sva[1], &
1376 mvl, &v[v_offset], ldv, &epsln, &sfmin, &tol, &c__2, &
1377 work[*n + 1], &i__1, &ierr);
1380 sgsvj0_(jobv, &n2, &n4, &a[(n4 + 1) * a_dim1 + 1], lda, &work[n4
1381 + 1], &sva[n4 + 1], &mvl, &v[n4 * q + 1 + (n4 + 1) *
1382 v_dim1], ldv, &epsln, &sfmin, &tol, &c__1, &work[*n + 1],
1386 sgsvj1_(jobv, &n2, &n2, &n4, &a[a_offset], lda, &work[1], &sva[1],
1387 &mvl, &v[v_offset], ldv, &epsln, &sfmin, &tol, &c__1, &
1388 work[*n + 1], &i__1, &ierr);
1392 sgsvj0_(jobv, &i__1, &n4, &a[(n2 + 1) * a_dim1 + 1], lda, &work[
1393 n2 + 1], &sva[n2 + 1], &mvl, &v[n2 * q + 1 + (n2 + 1) *
1394 v_dim1], ldv, &epsln, &sfmin, &tol, &c__1, &work[*n + 1],
1401 for (i__ = 1; i__ <= 30; ++i__) {
1411 /* Each sweep is unrolled using KBL-by-KBL tiles over the pivot pairs */
1412 /* 1 <= p < q <= N. This is the first step toward a blocked implementation */
1413 /* of the rotations. New implementation, based on block transformations, */
1414 /* is under development. */
1417 for (ibr = 1; ibr <= i__1; ++ibr) {
1419 igl = (ibr - 1) * kbl + 1;
1422 i__3 = lkahead, i__4 = nbl - ibr;
1423 i__2 = f2cmin(i__3,i__4);
1424 for (ir1 = 0; ir1 <= i__2; ++ir1) {
1429 i__4 = igl + kbl - 1, i__5 = *n - 1;
1430 i__3 = f2cmin(i__4,i__5);
1431 for (p = igl; p <= i__3; ++p) {
1435 q = isamax_(&i__4, &sva[p], &c__1) + p - 1;
1437 sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 +
1440 sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
1441 v_dim1 + 1], &c__1);
1453 /* Column norms are periodically updated by explicit */
1454 /* norm computation. */
1456 /* Unfortunately, some BLAS implementations compute SNRM2(M,A(1,p),1) */
1457 /* as SQRT(SDOT(M,A(1,p),1,A(1,p),1)), which may cause the result to */
1458 /* overflow for ||A(:,p)||_2 > SQRT(overflow_threshold), and to */
1459 /* underflow for ||A(:,p)||_2 < SQRT(underflow_threshold). */
1460 /* Hence, SNRM2 cannot be trusted, not even in the case when */
1461 /* the true norm is far from the under(over)flow boundaries. */
1462 /* If properly implemented SNRM2 is available, the IF-THEN-ELSE */
1463 /* below should read "AAPP = SNRM2( M, A(1,p), 1 ) * WORK(p)". */
1465 if (sva[p] < rootbig && sva[p] > rootsfmin) {
1466 sva[p] = snrm2_(m, &a[p * a_dim1 + 1], &c__1) *
1471 slassq_(m, &a[p * a_dim1 + 1], &c__1, &temp1, &
1473 sva[p] = temp1 * sqrt(aapp) * work[p];
1485 i__5 = igl + kbl - 1;
1486 i__4 = f2cmin(i__5,*n);
1487 for (q = p + 1; q <= i__4; ++q) {
1495 rotok = small * aapp <= aaqq;
1496 if (aapp < big / aaqq) {
1497 aapq = sdot_(m, &a[p * a_dim1 + 1], &
1498 c__1, &a[q * a_dim1 + 1], &
1499 c__1) * work[p] * work[q] /
1502 scopy_(m, &a[p * a_dim1 + 1], &c__1, &
1503 work[*n + 1], &c__1);
1504 slascl_("G", &c__0, &c__0, &aapp, &
1505 work[p], m, &c__1, &work[*n +
1507 aapq = sdot_(m, &work[*n + 1], &c__1,
1508 &a[q * a_dim1 + 1], &c__1) *
1512 rotok = aapp <= aaqq / small;
1513 if (aapp > small / aaqq) {
1514 aapq = sdot_(m, &a[p * a_dim1 + 1], &
1515 c__1, &a[q * a_dim1 + 1], &
1516 c__1) * work[p] * work[q] /
1519 scopy_(m, &a[q * a_dim1 + 1], &c__1, &
1520 work[*n + 1], &c__1);
1521 slascl_("G", &c__0, &c__0, &aaqq, &
1522 work[q], m, &c__1, &work[*n +
1524 aapq = sdot_(m, &work[*n + 1], &c__1,
1525 &a[p * a_dim1 + 1], &c__1) *
1531 r__1 = mxaapq, r__2 = abs(aapq);
1532 mxaapq = f2cmax(r__1,r__2);
1534 /* TO rotate or NOT to rotate, THAT is the question ... */
1536 if (abs(aapq) > tol) {
1538 /* [RTD] ROTATED = ROTATED + ONE */
1548 aqoap = aaqq / aapp;
1549 apoaq = aapp / aaqq;
1550 theta = (r__1 = aqoap - apoaq, abs(
1551 r__1)) * -.5f / aapq;
1553 if (abs(theta) > bigtheta) {
1556 fastr[2] = t * work[p] / work[q];
1557 fastr[3] = -t * work[q] / work[p];
1558 srotm_(m, &a[p * a_dim1 + 1], &
1559 c__1, &a[q * a_dim1 + 1],
1562 srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
1563 v_dim1 + 1], &c__1, fastr);
1566 r__1 = 0.f, r__2 = t * apoaq *
1568 sva[q] = aaqq * sqrt((f2cmax(r__1,
1571 r__1 = 0.f, r__2 = 1.f - t *
1573 aapp *= sqrt((f2cmax(r__1,r__2)));
1575 r__1 = mxsinj, r__2 = abs(t);
1576 mxsinj = f2cmax(r__1,r__2);
1581 thsign = -r_sign(&c_b18, &aapq);
1582 t = 1.f / (theta + thsign * sqrt(
1583 theta * theta + 1.f));
1584 cs = sqrt(1.f / (t * t + 1.f));
1588 r__1 = mxsinj, r__2 = abs(sn);
1589 mxsinj = f2cmax(r__1,r__2);
1591 r__1 = 0.f, r__2 = t * apoaq *
1593 sva[q] = aaqq * sqrt((f2cmax(r__1,
1596 r__1 = 0.f, r__2 = 1.f - t *
1598 aapp *= sqrt((f2cmax(r__1,r__2)));
1600 apoaq = work[p] / work[q];
1601 aqoap = work[q] / work[p];
1602 if (work[p] >= 1.f) {
1603 if (work[q] >= 1.f) {
1604 fastr[2] = t * apoaq;
1605 fastr[3] = -t * aqoap;
1608 srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
1609 a_dim1 + 1], &c__1, fastr);
1611 srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
1612 q * v_dim1 + 1], &c__1, fastr);
1616 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
1617 p * a_dim1 + 1], &c__1);
1618 r__1 = cs * sn * apoaq;
1619 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
1620 q * a_dim1 + 1], &c__1);
1625 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
1626 c__1, &v[p * v_dim1 + 1], &c__1);
1627 r__1 = cs * sn * apoaq;
1628 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
1629 c__1, &v[q * v_dim1 + 1], &c__1);
1633 if (work[q] >= 1.f) {
1635 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
1636 q * a_dim1 + 1], &c__1);
1637 r__1 = -cs * sn * aqoap;
1638 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
1639 p * a_dim1 + 1], &c__1);
1644 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
1645 c__1, &v[q * v_dim1 + 1], &c__1);
1646 r__1 = -cs * sn * aqoap;
1647 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
1648 c__1, &v[p * v_dim1 + 1], &c__1);
1651 if (work[p] >= work[q]) {
1653 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
1654 &a[p * a_dim1 + 1], &c__1);
1655 r__1 = cs * sn * apoaq;
1656 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
1657 &a[q * a_dim1 + 1], &c__1);
1662 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
1663 &c__1, &v[p * v_dim1 + 1], &
1665 r__1 = cs * sn * apoaq;
1666 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
1667 &c__1, &v[q * v_dim1 + 1], &
1672 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
1673 &a[q * a_dim1 + 1], &c__1);
1674 r__1 = -cs * sn * aqoap;
1675 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
1676 &a[p * a_dim1 + 1], &c__1);
1681 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
1682 &c__1, &v[q * v_dim1 + 1], &
1684 r__1 = -cs * sn * aqoap;
1685 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
1686 &c__1, &v[p * v_dim1 + 1], &
1695 scopy_(m, &a[p * a_dim1 + 1], &c__1, &
1696 work[*n + 1], &c__1);
1697 slascl_("G", &c__0, &c__0, &aapp, &
1698 c_b18, m, &c__1, &work[*n + 1]
1700 slascl_("G", &c__0, &c__0, &aaqq, &
1701 c_b18, m, &c__1, &a[q *
1702 a_dim1 + 1], lda, &ierr);
1703 temp1 = -aapq * work[p] / work[q];
1704 saxpy_(m, &temp1, &work[*n + 1], &
1705 c__1, &a[q * a_dim1 + 1], &
1707 slascl_("G", &c__0, &c__0, &c_b18, &
1708 aaqq, m, &c__1, &a[q * a_dim1
1711 r__1 = 0.f, r__2 = 1.f - aapq * aapq;
1712 sva[q] = aaqq * sqrt((f2cmax(r__1,r__2)))
1714 mxsinj = f2cmax(mxsinj,sfmin);
1716 /* END IF ROTOK THEN ... ELSE */
1718 /* In the case of cancellation in updating SVA(q), SVA(p) */
1719 /* recompute SVA(q), SVA(p). */
1721 /* Computing 2nd power */
1722 r__1 = sva[q] / aaqq;
1723 if (r__1 * r__1 <= rooteps) {
1724 if (aaqq < rootbig && aaqq >
1726 sva[q] = snrm2_(m, &a[q * a_dim1
1727 + 1], &c__1) * work[q];
1731 slassq_(m, &a[q * a_dim1 + 1], &
1733 sva[q] = t * sqrt(aaqq) * work[q];
1736 if (aapp / aapp0 <= rooteps) {
1737 if (aapp < rootbig && aapp >
1739 aapp = snrm2_(m, &a[p * a_dim1 +
1740 1], &c__1) * work[p];
1744 slassq_(m, &a[p * a_dim1 + 1], &
1746 aapp = t * sqrt(aapp) * work[p];
1752 /* A(:,p) and A(:,q) already numerically orthogonal */
1756 /* [RTD] SKIPPED = SKIPPED + 1 */
1760 /* A(:,q) is zero column */
1767 if (i__ <= swband && pskipped > rowskip) {
1780 /* bailed out of q-loop */
1786 if (ir1 == 0 && aapp == 0.f) {
1788 i__4 = igl + kbl - 1;
1789 notrot = notrot + f2cmin(i__4,*n) - p;
1795 /* end of the p-loop */
1796 /* end of doing the block ( ibr, ibr ) */
1799 /* end of ir1-loop */
1801 /* ... go to the off diagonal blocks */
1803 igl = (ibr - 1) * kbl + 1;
1806 for (jbc = ibr + 1; jbc <= i__2; ++jbc) {
1808 jgl = (jbc - 1) * kbl + 1;
1810 /* doing the block at ( ibr, jbc ) */
1814 i__4 = igl + kbl - 1;
1815 i__3 = f2cmin(i__4,*n);
1816 for (p = igl; p <= i__3; ++p) {
1824 i__5 = jgl + kbl - 1;
1825 i__4 = f2cmin(i__5,*n);
1826 for (q = jgl; q <= i__4; ++q) {
1833 /* Safe Gram matrix computation */
1837 rotok = small * aapp <= aaqq;
1839 rotok = small * aaqq <= aapp;
1841 if (aapp < big / aaqq) {
1842 aapq = sdot_(m, &a[p * a_dim1 + 1], &
1843 c__1, &a[q * a_dim1 + 1], &
1844 c__1) * work[p] * work[q] /
1847 scopy_(m, &a[p * a_dim1 + 1], &c__1, &
1848 work[*n + 1], &c__1);
1849 slascl_("G", &c__0, &c__0, &aapp, &
1850 work[p], m, &c__1, &work[*n +
1852 aapq = sdot_(m, &work[*n + 1], &c__1,
1853 &a[q * a_dim1 + 1], &c__1) *
1858 rotok = aapp <= aaqq / small;
1860 rotok = aaqq <= aapp / small;
1862 if (aapp > small / aaqq) {
1863 aapq = sdot_(m, &a[p * a_dim1 + 1], &
1864 c__1, &a[q * a_dim1 + 1], &
1865 c__1) * work[p] * work[q] /
1868 scopy_(m, &a[q * a_dim1 + 1], &c__1, &
1869 work[*n + 1], &c__1);
1870 slascl_("G", &c__0, &c__0, &aaqq, &
1871 work[q], m, &c__1, &work[*n +
1873 aapq = sdot_(m, &work[*n + 1], &c__1,
1874 &a[p * a_dim1 + 1], &c__1) *
1880 r__1 = mxaapq, r__2 = abs(aapq);
1881 mxaapq = f2cmax(r__1,r__2);
1883 /* TO rotate or NOT to rotate, THAT is the question ... */
1885 if (abs(aapq) > tol) {
1887 /* [RTD] ROTATED = ROTATED + 1 */
1893 aqoap = aaqq / aapp;
1894 apoaq = aapp / aaqq;
1895 theta = (r__1 = aqoap - apoaq, abs(
1896 r__1)) * -.5f / aapq;
1901 if (abs(theta) > bigtheta) {
1903 fastr[2] = t * work[p] / work[q];
1904 fastr[3] = -t * work[q] / work[p];
1905 srotm_(m, &a[p * a_dim1 + 1], &
1906 c__1, &a[q * a_dim1 + 1],
1909 srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q *
1910 v_dim1 + 1], &c__1, fastr);
1913 r__1 = 0.f, r__2 = t * apoaq *
1915 sva[q] = aaqq * sqrt((f2cmax(r__1,
1918 r__1 = 0.f, r__2 = 1.f - t *
1920 aapp *= sqrt((f2cmax(r__1,r__2)));
1922 r__1 = mxsinj, r__2 = abs(t);
1923 mxsinj = f2cmax(r__1,r__2);
1927 thsign = -r_sign(&c_b18, &aapq);
1931 t = 1.f / (theta + thsign * sqrt(
1932 theta * theta + 1.f));
1933 cs = sqrt(1.f / (t * t + 1.f));
1936 r__1 = mxsinj, r__2 = abs(sn);
1937 mxsinj = f2cmax(r__1,r__2);
1939 r__1 = 0.f, r__2 = t * apoaq *
1941 sva[q] = aaqq * sqrt((f2cmax(r__1,
1944 r__1 = 0.f, r__2 = 1.f - t *
1946 aapp *= sqrt((f2cmax(r__1,r__2)));
1948 apoaq = work[p] / work[q];
1949 aqoap = work[q] / work[p];
1950 if (work[p] >= 1.f) {
1952 if (work[q] >= 1.f) {
1953 fastr[2] = t * apoaq;
1954 fastr[3] = -t * aqoap;
1957 srotm_(m, &a[p * a_dim1 + 1], &c__1, &a[q *
1958 a_dim1 + 1], &c__1, fastr);
1960 srotm_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[
1961 q * v_dim1 + 1], &c__1, fastr);
1965 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
1966 p * a_dim1 + 1], &c__1);
1967 r__1 = cs * sn * apoaq;
1968 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
1969 q * a_dim1 + 1], &c__1);
1972 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
1973 c__1, &v[p * v_dim1 + 1], &c__1);
1974 r__1 = cs * sn * apoaq;
1975 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
1976 c__1, &v[q * v_dim1 + 1], &c__1);
1982 if (work[q] >= 1.f) {
1984 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1, &a[
1985 q * a_dim1 + 1], &c__1);
1986 r__1 = -cs * sn * aqoap;
1987 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1, &a[
1988 p * a_dim1 + 1], &c__1);
1991 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1], &
1992 c__1, &v[q * v_dim1 + 1], &c__1);
1993 r__1 = -cs * sn * aqoap;
1994 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1], &
1995 c__1, &v[p * v_dim1 + 1], &c__1);
2000 if (work[p] >= work[q]) {
2002 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
2003 &a[p * a_dim1 + 1], &c__1);
2004 r__1 = cs * sn * apoaq;
2005 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
2006 &a[q * a_dim1 + 1], &c__1);
2011 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
2012 &c__1, &v[p * v_dim1 + 1], &
2014 r__1 = cs * sn * apoaq;
2015 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
2016 &c__1, &v[q * v_dim1 + 1], &
2021 saxpy_(m, &r__1, &a[p * a_dim1 + 1], &c__1,
2022 &a[q * a_dim1 + 1], &c__1);
2023 r__1 = -cs * sn * aqoap;
2024 saxpy_(m, &r__1, &a[q * a_dim1 + 1], &c__1,
2025 &a[p * a_dim1 + 1], &c__1);
2030 saxpy_(&mvl, &r__1, &v[p * v_dim1 + 1],
2031 &c__1, &v[q * v_dim1 + 1], &
2033 r__1 = -cs * sn * aqoap;
2034 saxpy_(&mvl, &r__1, &v[q * v_dim1 + 1],
2035 &c__1, &v[p * v_dim1 + 1], &
2045 scopy_(m, &a[p * a_dim1 + 1], &
2046 c__1, &work[*n + 1], &
2048 slascl_("G", &c__0, &c__0, &aapp,
2049 &c_b18, m, &c__1, &work[*
2050 n + 1], lda, &ierr);
2051 slascl_("G", &c__0, &c__0, &aaqq,
2052 &c_b18, m, &c__1, &a[q *
2053 a_dim1 + 1], lda, &ierr);
2054 temp1 = -aapq * work[p] / work[q];
2055 saxpy_(m, &temp1, &work[*n + 1], &
2056 c__1, &a[q * a_dim1 + 1],
2058 slascl_("G", &c__0, &c__0, &c_b18,
2059 &aaqq, m, &c__1, &a[q *
2060 a_dim1 + 1], lda, &ierr);
2062 r__1 = 0.f, r__2 = 1.f - aapq *
2064 sva[q] = aaqq * sqrt((f2cmax(r__1,
2066 mxsinj = f2cmax(mxsinj,sfmin);
2068 scopy_(m, &a[q * a_dim1 + 1], &
2069 c__1, &work[*n + 1], &
2071 slascl_("G", &c__0, &c__0, &aaqq,
2072 &c_b18, m, &c__1, &work[*
2073 n + 1], lda, &ierr);
2074 slascl_("G", &c__0, &c__0, &aapp,
2075 &c_b18, m, &c__1, &a[p *
2076 a_dim1 + 1], lda, &ierr);
2077 temp1 = -aapq * work[q] / work[p];
2078 saxpy_(m, &temp1, &work[*n + 1], &
2079 c__1, &a[p * a_dim1 + 1],
2081 slascl_("G", &c__0, &c__0, &c_b18,
2082 &aapp, m, &c__1, &a[p *
2083 a_dim1 + 1], lda, &ierr);
2085 r__1 = 0.f, r__2 = 1.f - aapq *
2087 sva[p] = aapp * sqrt((f2cmax(r__1,
2089 mxsinj = f2cmax(mxsinj,sfmin);
2092 /* END IF ROTOK THEN ... ELSE */
2094 /* In the case of cancellation in updating SVA(q) */
2095 /* Computing 2nd power */
2096 r__1 = sva[q] / aaqq;
2097 if (r__1 * r__1 <= rooteps) {
2098 if (aaqq < rootbig && aaqq >
2100 sva[q] = snrm2_(m, &a[q * a_dim1
2101 + 1], &c__1) * work[q];
2105 slassq_(m, &a[q * a_dim1 + 1], &
2107 sva[q] = t * sqrt(aaqq) * work[q];
2110 /* Computing 2nd power */
2111 r__1 = aapp / aapp0;
2112 if (r__1 * r__1 <= rooteps) {
2113 if (aapp < rootbig && aapp >
2115 aapp = snrm2_(m, &a[p * a_dim1 +
2116 1], &c__1) * work[p];
2120 slassq_(m, &a[p * a_dim1 + 1], &
2122 aapp = t * sqrt(aapp) * work[p];
2126 /* end of OK rotation */
2129 /* [RTD] SKIPPED = SKIPPED + 1 */
2139 if (i__ <= swband && ijblsk >= blskip) {
2144 if (i__ <= swband && pskipped > rowskip) {
2152 /* end of the q-loop */
2161 i__4 = jgl + kbl - 1;
2162 notrot = notrot + f2cmin(i__4,*n) - jgl + 1;
2172 /* end of the p-loop */
2175 /* end of the jbc-loop */
2177 /* 2011 bailed out of the jbc-loop */
2179 i__3 = igl + kbl - 1;
2180 i__2 = f2cmin(i__3,*n);
2181 for (p = igl; p <= i__2; ++p) {
2182 sva[p] = (r__1 = sva[p], abs(r__1));
2188 /* 2000 :: end of the ibr-loop */
2190 if (sva[*n] < rootbig && sva[*n] > rootsfmin) {
2191 sva[*n] = snrm2_(m, &a[*n * a_dim1 + 1], &c__1) * work[*n];
2195 slassq_(m, &a[*n * a_dim1 + 1], &c__1, &t, &aapp);
2196 sva[*n] = t * sqrt(aapp) * work[*n];
2199 /* Additional steering devices */
2201 if (i__ < swband && (mxaapq <= roottol || iswrot <= *n)) {
2205 if (i__ > swband + 1 && mxaapq < sqrt((real) (*n)) * tol && (real) (*
2206 n) * mxaapq * mxsinj < tol) {
2210 if (notrot >= emptsw) {
2216 /* end i=1:NSWEEP loop */
2218 /* #:( Reaching this point means that the procedure has not converged. */
2223 /* #:) Reaching this point means numerical convergence after the i-th */
2227 /* #:) INFO = 0 confirms successful iterations. */
2230 /* Sort the singular values and find how many are above */
2231 /* the underflow threshold. */
2236 for (p = 1; p <= i__1; ++p) {
2238 q = isamax_(&i__2, &sva[p], &c__1) + p - 1;
2246 sswap_(m, &a[p * a_dim1 + 1], &c__1, &a[q * a_dim1 + 1], &c__1);
2248 sswap_(&mvl, &v[p * v_dim1 + 1], &c__1, &v[q * v_dim1 + 1], &
2252 if (sva[p] != 0.f) {
2254 if (sva[p] * skl > sfmin) {
2260 if (sva[*n] != 0.f) {
2262 if (sva[*n] * skl > sfmin) {
2267 /* Normalize the left singular vectors. */
2269 if (lsvec || uctol) {
2271 for (p = 1; p <= i__1; ++p) {
2272 r__1 = work[p] / sva[p];
2273 sscal_(m, &r__1, &a[p * a_dim1 + 1], &c__1);
2278 /* Scale the product of Jacobi rotations (assemble the fast rotations). */
2283 for (p = 1; p <= i__1; ++p) {
2284 sscal_(&mvl, &work[p], &v[p * v_dim1 + 1], &c__1);
2289 for (p = 1; p <= i__1; ++p) {
2290 temp1 = 1.f / snrm2_(&mvl, &v[p * v_dim1 + 1], &c__1);
2291 sscal_(&mvl, &temp1, &v[p * v_dim1 + 1], &c__1);
2297 /* Undo scaling, if necessary (and possible). */
2298 if (skl > 1.f && sva[1] < big / skl || skl < 1.f && sva[f2cmax(n2,1)] >
2301 for (p = 1; p <= i__1; ++p) {
2302 sva[p] = skl * sva[p];
2309 /* The singular values of A are SKL*SVA(1:N). If SKL.NE.ONE */
2310 /* then some of the singular values may overflow or underflow and */
2311 /* the spectrum is given in this factored representation. */
2313 work[2] = (real) n4;
2314 /* N4 is the number of computed nonzero singular values of A. */
2316 work[3] = (real) n2;
2317 /* N2 is the number of singular values of A greater than SFMIN. */
2318 /* If N2<N, SVA(N2:N) contains ZEROS and/or denormalized numbers */
2319 /* that may carry some information. */
2321 work[4] = (real) i__;
2322 /* i is the index of the last sweep before declaring convergence. */
2325 /* MXAAPQ is the largest absolute value of scaled pivots in the */
2329 /* MXSINJ is the largest absolute value of the sines of Jacobi angles */
2330 /* in the last sweep */