14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static integer c__1 = 1;
517 /* > \brief \b DLARRF finds a new relatively robust representation such that at least one of the eigenvalues i
518 s relatively isolated. */
520 /* =========== DOCUMENTATION =========== */
522 /* Online html documentation available at */
523 /* http://www.netlib.org/lapack/explore-html/ */
526 /* > Download DLARRF + dependencies */
527 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dlarrf.
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dlarrf.
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dlarrf.
541 /* SUBROUTINE DLARRF( N, D, L, LD, CLSTRT, CLEND, */
543 /* SPDIAM, CLGAPL, CLGAPR, PIVMIN, SIGMA, */
544 /* DPLUS, LPLUS, WORK, INFO ) */
546 /* INTEGER CLSTRT, CLEND, INFO, N */
547 /* DOUBLE PRECISION CLGAPL, CLGAPR, PIVMIN, SIGMA, SPDIAM */
548 /* DOUBLE PRECISION D( * ), DPLUS( * ), L( * ), LD( * ), */
549 /* $ LPLUS( * ), W( * ), WGAP( * ), WERR( * ), WORK( * ) */
552 /* > \par Purpose: */
557 /* > Given the initial representation L D L^T and its cluster of close */
558 /* > eigenvalues (in a relative measure), W( CLSTRT ), W( CLSTRT+1 ), ... */
559 /* > W( CLEND ), DLARRF finds a new relatively robust representation */
560 /* > L D L^T - SIGMA I = L(+) D(+) L(+)^T such that at least one of the */
561 /* > eigenvalues of L(+) D(+) L(+)^T is relatively isolated. */
570 /* > The order of the matrix (subblock, if the matrix split). */
575 /* > D is DOUBLE PRECISION array, dimension (N) */
576 /* > The N diagonal elements of the diagonal matrix D. */
581 /* > L is DOUBLE PRECISION array, dimension (N-1) */
582 /* > The (N-1) subdiagonal elements of the unit bidiagonal */
586 /* > \param[in] LD */
588 /* > LD is DOUBLE PRECISION array, dimension (N-1) */
589 /* > The (N-1) elements L(i)*D(i). */
592 /* > \param[in] CLSTRT */
594 /* > CLSTRT is INTEGER */
595 /* > The index of the first eigenvalue in the cluster. */
598 /* > \param[in] CLEND */
600 /* > CLEND is INTEGER */
601 /* > The index of the last eigenvalue in the cluster. */
606 /* > W is DOUBLE PRECISION array, dimension */
607 /* > dimension is >= (CLEND-CLSTRT+1) */
608 /* > The eigenvalue APPROXIMATIONS of L D L^T in ascending order. */
609 /* > W( CLSTRT ) through W( CLEND ) form the cluster of relatively */
610 /* > close eigenalues. */
613 /* > \param[in,out] WGAP */
615 /* > WGAP is DOUBLE PRECISION array, dimension */
616 /* > dimension is >= (CLEND-CLSTRT+1) */
617 /* > The separation from the right neighbor eigenvalue in W. */
620 /* > \param[in] WERR */
622 /* > WERR is DOUBLE PRECISION array, dimension */
623 /* > dimension is >= (CLEND-CLSTRT+1) */
624 /* > WERR contain the semiwidth of the uncertainty */
625 /* > interval of the corresponding eigenvalue APPROXIMATION in W */
628 /* > \param[in] SPDIAM */
630 /* > SPDIAM is DOUBLE PRECISION */
631 /* > estimate of the spectral diameter obtained from the */
632 /* > Gerschgorin intervals */
635 /* > \param[in] CLGAPL */
637 /* > CLGAPL is DOUBLE PRECISION */
640 /* > \param[in] CLGAPR */
642 /* > CLGAPR is DOUBLE PRECISION */
643 /* > absolute gap on each end of the cluster. */
644 /* > Set by the calling routine to protect against shifts too close */
645 /* > to eigenvalues outside the cluster. */
648 /* > \param[in] PIVMIN */
650 /* > PIVMIN is DOUBLE PRECISION */
651 /* > The minimum pivot allowed in the Sturm sequence. */
654 /* > \param[out] SIGMA */
656 /* > SIGMA is DOUBLE PRECISION */
657 /* > The shift used to form L(+) D(+) L(+)^T. */
660 /* > \param[out] DPLUS */
662 /* > DPLUS is DOUBLE PRECISION array, dimension (N) */
663 /* > The N diagonal elements of the diagonal matrix D(+). */
666 /* > \param[out] LPLUS */
668 /* > LPLUS is DOUBLE PRECISION array, dimension (N-1) */
669 /* > The first (N-1) elements of LPLUS contain the subdiagonal */
670 /* > elements of the unit bidiagonal matrix L(+). */
673 /* > \param[out] WORK */
675 /* > WORK is DOUBLE PRECISION array, dimension (2*N) */
679 /* > \param[out] INFO */
681 /* > INFO is INTEGER */
682 /* > Signals processing OK (=0) or failure (=1) */
688 /* > \author Univ. of Tennessee */
689 /* > \author Univ. of California Berkeley */
690 /* > \author Univ. of Colorado Denver */
691 /* > \author NAG Ltd. */
693 /* > \date June 2016 */
695 /* > \ingroup OTHERauxiliary */
697 /* > \par Contributors: */
698 /* ================== */
700 /* > Beresford Parlett, University of California, Berkeley, USA \n */
701 /* > Jim Demmel, University of California, Berkeley, USA \n */
702 /* > Inderjit Dhillon, University of Texas, Austin, USA \n */
703 /* > Osni Marques, LBNL/NERSC, USA \n */
704 /* > Christof Voemel, University of California, Berkeley, USA */
706 /* ===================================================================== */
707 /* Subroutine */ int dlarrf_(integer *n, doublereal *d__, doublereal *l,
708 doublereal *ld, integer *clstrt, integer *clend, doublereal *w,
709 doublereal *wgap, doublereal *werr, doublereal *spdiam, doublereal *
710 clgapl, doublereal *clgapr, doublereal *pivmin, doublereal *sigma,
711 doublereal *dplus, doublereal *lplus, doublereal *work, integer *info)
713 /* System generated locals */
715 doublereal d__1, d__2, d__3;
717 /* Local variables */
718 doublereal growthbound, fail, fact, oldp;
724 doublereal s, avgap, ldmax, rdmax;
726 extern /* Subroutine */ int dcopy_(integer *, doublereal *, integer *,
727 doublereal *, integer *);
728 doublereal bestshift, smlgrowth;
730 extern doublereal dlamch_(char *);
733 doublereal mingap, lsigma, rdelta;
734 extern logical disnan_(doublereal *);
736 doublereal rsigma, clwdth;
737 logical sawnan1, sawnan2;
740 doublereal max1, max2, rrr1, rrr2, znm2;
743 /* -- LAPACK auxiliary routine (version 3.7.1) -- */
744 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
745 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
749 /* ===================================================================== */
752 /* Parameter adjustments */
766 /* Quick return if possible */
773 eps = dlamch_("Precision");
776 /* Note that we cannot guarantee that for any of the shifts tried, */
777 /* the factorization has a small or even moderate element growth. */
778 /* There could be Ritz values at both ends of the cluster and despite */
779 /* backing off, there are examples where all factorizations tried */
780 /* (in IEEE mode, allowing zero pivots & infinities) have INFINITE */
781 /* element growth. */
782 /* For this reason, we should use PIVMIN in this subroutine so that at */
783 /* least the L D L^T factorization exists. It can be checked afterwards */
784 /* whether the element growth caused bad residuals/orthogonality. */
785 /* Decide whether the code should accept the best among all */
786 /* representations despite large element growth or signal INFO=1 */
787 /* Setting NOFAIL to .FALSE. for quick fix for bug 113 */
790 /* Compute the average gap length of the cluster */
791 clwdth = (d__1 = w[*clend] - w[*clstrt], abs(d__1)) + werr[*clend] + werr[
793 avgap = clwdth / (doublereal) (*clend - *clstrt);
794 mingap = f2cmin(*clgapl,*clgapr);
795 /* Initial values for shifts to both ends of cluster */
797 d__1 = w[*clstrt], d__2 = w[*clend];
798 lsigma = f2cmin(d__1,d__2) - werr[*clstrt];
800 d__1 = w[*clstrt], d__2 = w[*clend];
801 rsigma = f2cmax(d__1,d__2) + werr[*clend];
802 /* Use a small fudge to make sure that we really shift to the outside */
803 lsigma -= abs(lsigma) * 4. * eps;
804 rsigma += abs(rsigma) * 4. * eps;
805 /* Compute upper bounds for how much to back off the initial shifts */
806 ldmax = mingap * .25 + *pivmin * 2.;
807 rdmax = mingap * .25 + *pivmin * 2.;
809 d__1 = avgap, d__2 = wgap[*clstrt];
810 ldelta = f2cmax(d__1,d__2) / fact;
812 d__1 = avgap, d__2 = wgap[*clend - 1];
813 rdelta = f2cmax(d__1,d__2) / fact;
815 /* Initialize the record of the best representation found */
819 fail = (doublereal) (*n - 1) * mingap / (*spdiam * eps);
820 fail2 = (doublereal) (*n - 1) * mingap / (*spdiam * sqrt(eps));
823 /* while (KTRY <= KTRYMAX) */
825 growthbound = *spdiam * 8.;
829 /* Ensure that we do not back off too much of the initial shifts */
830 ldelta = f2cmin(ldmax,ldelta);
831 rdelta = f2cmin(rdmax,rdelta);
832 /* Compute the element growth when shifting to both ends of the cluster */
833 /* accept the shift if there is no element growth at one of the two ends */
836 dplus[1] = d__[1] + s;
837 if (abs(dplus[1]) < *pivmin) {
838 dplus[1] = -(*pivmin);
839 /* Need to set SAWNAN1 because refined RRR test should not be used */
843 max1 = abs(dplus[1]);
845 for (i__ = 1; i__ <= i__1; ++i__) {
846 lplus[i__] = ld[i__] / dplus[i__];
847 s = s * lplus[i__] * l[i__] - lsigma;
848 dplus[i__ + 1] = d__[i__ + 1] + s;
849 if ((d__1 = dplus[i__ + 1], abs(d__1)) < *pivmin) {
850 dplus[i__ + 1] = -(*pivmin);
851 /* Need to set SAWNAN1 because refined RRR test should not be used */
856 d__2 = max1, d__3 = (d__1 = dplus[i__ + 1], abs(d__1));
857 max1 = f2cmax(d__2,d__3);
860 sawnan1 = sawnan1 || disnan_(&max1);
861 if (forcer || max1 <= growthbound && ! sawnan1) {
868 work[1] = d__[1] + s;
869 if (abs(work[1]) < *pivmin) {
870 work[1] = -(*pivmin);
871 /* Need to set SAWNAN2 because refined RRR test should not be used */
877 for (i__ = 1; i__ <= i__1; ++i__) {
878 work[*n + i__] = ld[i__] / work[i__];
879 s = s * work[*n + i__] * l[i__] - rsigma;
880 work[i__ + 1] = d__[i__ + 1] + s;
881 if ((d__1 = work[i__ + 1], abs(d__1)) < *pivmin) {
882 work[i__ + 1] = -(*pivmin);
883 /* Need to set SAWNAN2 because refined RRR test should not be used */
888 d__2 = max2, d__3 = (d__1 = work[i__ + 1], abs(d__1));
889 max2 = f2cmax(d__2,d__3);
892 sawnan2 = sawnan2 || disnan_(&max2);
893 if (forcer || max2 <= growthbound && ! sawnan2) {
898 /* If we are at this point, both shifts led to too much element growth */
899 /* Record the better of the two shifts (provided it didn't lead to NaN) */
900 if (sawnan1 && sawnan2) {
901 /* both MAX1 and MAX2 are NaN */
906 if (max1 <= smlgrowth) {
912 if (sawnan1 || max2 <= max1) {
915 if (max2 <= smlgrowth) {
921 /* If we are here, both the left and the right shift led to */
922 /* element growth. If the element growth is moderate, then */
923 /* we may still accept the representation, if it passes a */
924 /* refined test for RRR. This test supposes that no NaN occurred. */
925 /* Moreover, we use the refined RRR test only for isolated clusters. */
926 if (clwdth < mingap / 128. && f2cmin(max1,max2) < fail2 && ! sawnan1 && !
933 if (tryrrr1 && dorrr1) {
935 tmp = (d__1 = dplus[*n], abs(d__1));
939 for (i__ = *n - 1; i__ >= 1; --i__) {
941 prod = dplus[i__ + 1] * work[*n + i__ + 1] / (dplus[i__] *
942 work[*n + i__]) * oldp;
944 prod *= (d__1 = work[*n + i__], abs(d__1));
947 /* Computing 2nd power */
951 d__2 = tmp, d__3 = (d__1 = dplus[i__] * prod, abs(d__1));
952 tmp = f2cmax(d__2,d__3);
955 rrr1 = tmp / (*spdiam * sqrt(znm2));
961 } else if (indx == 2) {
962 tmp = (d__1 = work[*n], abs(d__1));
966 for (i__ = *n - 1; i__ >= 1; --i__) {
968 prod = work[i__ + 1] * lplus[i__ + 1] / (work[i__] *
971 prod *= (d__1 = lplus[i__], abs(d__1));
974 /* Computing 2nd power */
978 d__2 = tmp, d__3 = (d__1 = work[i__] * prod, abs(d__1));
979 tmp = f2cmax(d__2,d__3);
982 rrr2 = tmp / (*spdiam * sqrt(znm2));
992 /* If we are here, both shifts failed also the RRR test. */
993 /* Back off to the outside */
995 d__1 = lsigma - ldelta, d__2 = lsigma - ldmax;
996 lsigma = f2cmax(d__1,d__2);
998 d__1 = rsigma + rdelta, d__2 = rsigma + rdmax;
999 rsigma = f2cmin(d__1,d__2);
1005 /* None of the representations investigated satisfied our */
1006 /* criteria. Take the best one we found. */
1007 if (smlgrowth < fail || nofail) {
1019 } else if (shift == 2) {
1020 /* store new L and D back into DPLUS, LPLUS */
1021 dcopy_(n, &work[1], &c__1, &dplus[1], &c__1);
1023 dcopy_(&i__1, &work[*n + 1], &c__1, &lplus[1], &c__1);