14 typedef long long BLASLONG;
15 typedef unsigned long long BLASULONG;
17 typedef long BLASLONG;
18 typedef unsigned long BLASULONG;
22 typedef BLASLONG blasint;
24 #define blasabs(x) llabs(x)
26 #define blasabs(x) labs(x)
30 #define blasabs(x) abs(x)
33 typedef blasint integer;
35 typedef unsigned int uinteger;
36 typedef char *address;
37 typedef short int shortint;
39 typedef double doublereal;
40 typedef struct { real r, i; } complex;
41 typedef struct { doublereal r, i; } doublecomplex;
43 static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;}
44 static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;}
45 static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;}
46 static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;}
48 static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;}
49 static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;}
50 static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;}
51 static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;}
53 #define pCf(z) (*_pCf(z))
54 #define pCd(z) (*_pCd(z))
56 typedef short int shortlogical;
57 typedef char logical1;
58 typedef char integer1;
63 /* Extern is for use with -E */
74 /*external read, write*/
83 /*internal read, write*/
113 /*rewind, backspace, endfile*/
125 ftnint *inex; /*parameters in standard's order*/
151 union Multitype { /* for multiple entry points */
162 typedef union Multitype Multitype;
164 struct Vardesc { /* for Namelist */
170 typedef struct Vardesc Vardesc;
177 typedef struct Namelist Namelist;
179 #define abs(x) ((x) >= 0 ? (x) : -(x))
180 #define dabs(x) (fabs(x))
181 #define f2cmin(a,b) ((a) <= (b) ? (a) : (b))
182 #define f2cmax(a,b) ((a) >= (b) ? (a) : (b))
183 #define dmin(a,b) (f2cmin(a,b))
184 #define dmax(a,b) (f2cmax(a,b))
185 #define bit_test(a,b) ((a) >> (b) & 1)
186 #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b)))
187 #define bit_set(a,b) ((a) | ((uinteger)1 << (b)))
189 #define abort_() { sig_die("Fortran abort routine called", 1); }
190 #define c_abs(z) (cabsf(Cf(z)))
191 #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); }
193 #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);}
194 #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);}
196 #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);}
197 #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);}
199 #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));}
200 #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));}
201 #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));}
202 //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));}
203 #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));}
204 #define d_abs(x) (fabs(*(x)))
205 #define d_acos(x) (acos(*(x)))
206 #define d_asin(x) (asin(*(x)))
207 #define d_atan(x) (atan(*(x)))
208 #define d_atn2(x, y) (atan2(*(x),*(y)))
209 #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); }
210 #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); }
211 #define d_cos(x) (cos(*(x)))
212 #define d_cosh(x) (cosh(*(x)))
213 #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 )
214 #define d_exp(x) (exp(*(x)))
215 #define d_imag(z) (cimag(Cd(z)))
216 #define r_imag(z) (cimagf(Cf(z)))
217 #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
218 #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x)))
219 #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
220 #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) )
221 #define d_log(x) (log(*(x)))
222 #define d_mod(x, y) (fmod(*(x), *(y)))
223 #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x)))
224 #define d_nint(x) u_nint(*(x))
225 #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a)))
226 #define d_sign(a,b) u_sign(*(a),*(b))
227 #define r_sign(a,b) u_sign(*(a),*(b))
228 #define d_sin(x) (sin(*(x)))
229 #define d_sinh(x) (sinh(*(x)))
230 #define d_sqrt(x) (sqrt(*(x)))
231 #define d_tan(x) (tan(*(x)))
232 #define d_tanh(x) (tanh(*(x)))
233 #define i_abs(x) abs(*(x))
234 #define i_dnnt(x) ((integer)u_nint(*(x)))
235 #define i_len(s, n) (n)
236 #define i_nint(x) ((integer)u_nint(*(x)))
237 #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b)))
238 #define pow_dd(ap, bp) ( pow(*(ap), *(bp)))
239 #define pow_si(B,E) spow_ui(*(B),*(E))
240 #define pow_ri(B,E) spow_ui(*(B),*(E))
241 #define pow_di(B,E) dpow_ui(*(B),*(E))
242 #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));}
243 #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));}
244 #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));}
245 #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; }
246 #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d))))
247 #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; }
248 #define sig_die(s, kill) { exit(1); }
249 #define s_stop(s, n) {exit(0);}
250 static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n";
251 #define z_abs(z) (cabs(Cd(z)))
252 #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));}
253 #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));}
254 #define myexit_() break;
255 #define mycycle() continue;
256 #define myceiling(w) {ceil(w)}
257 #define myhuge(w) {HUGE_VAL}
258 //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);}
259 #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)}
261 /* procedure parameter types for -A and -C++ */
263 #define F2C_proc_par_types 1
265 typedef logical (*L_fp)(...);
267 typedef logical (*L_fp)();
270 static float spow_ui(float x, integer n) {
271 float pow=1.0; unsigned long int u;
273 if(n < 0) n = -n, x = 1/x;
282 static double dpow_ui(double x, integer n) {
283 double pow=1.0; unsigned long int u;
285 if(n < 0) n = -n, x = 1/x;
295 static _Fcomplex cpow_ui(complex x, integer n) {
296 complex pow={1.0,0.0}; unsigned long int u;
298 if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i;
300 if(u & 01) pow.r *= x.r, pow.i *= x.i;
301 if(u >>= 1) x.r *= x.r, x.i *= x.i;
305 _Fcomplex p={pow.r, pow.i};
309 static _Complex float cpow_ui(_Complex float x, integer n) {
310 _Complex float pow=1.0; unsigned long int u;
312 if(n < 0) n = -n, x = 1/x;
323 static _Dcomplex zpow_ui(_Dcomplex x, integer n) {
324 _Dcomplex pow={1.0,0.0}; unsigned long int u;
326 if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1];
328 if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1];
329 if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1];
333 _Dcomplex p = {pow._Val[0], pow._Val[1]};
337 static _Complex double zpow_ui(_Complex double x, integer n) {
338 _Complex double pow=1.0; unsigned long int u;
340 if(n < 0) n = -n, x = 1/x;
350 static integer pow_ii(integer x, integer n) {
351 integer pow; unsigned long int u;
353 if (n == 0 || x == 1) pow = 1;
354 else if (x != -1) pow = x == 0 ? 1/x : 0;
357 if ((n > 0) || !(n == 0 || x == 1 || x != -1)) {
367 static integer dmaxloc_(double *w, integer s, integer e, integer *n)
369 double m; integer i, mi;
370 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
371 if (w[i-1]>m) mi=i ,m=w[i-1];
374 static integer smaxloc_(float *w, integer s, integer e, integer *n)
376 float m; integer i, mi;
377 for(m=w[s-1], mi=s, i=s+1; i<=e; i++)
378 if (w[i-1]>m) mi=i ,m=w[i-1];
381 static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
382 integer n = *n_, incx = *incx_, incy = *incy_, i;
384 _Fcomplex zdotc = {0.0, 0.0};
385 if (incx == 1 && incy == 1) {
386 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
387 zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0];
388 zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1];
391 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
392 zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0];
393 zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1];
399 _Complex float zdotc = 0.0;
400 if (incx == 1 && incy == 1) {
401 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
402 zdotc += conjf(Cf(&x[i])) * Cf(&y[i]);
405 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
406 zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]);
412 static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
413 integer n = *n_, incx = *incx_, incy = *incy_, i;
415 _Dcomplex zdotc = {0.0, 0.0};
416 if (incx == 1 && incy == 1) {
417 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
418 zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0];
419 zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1];
422 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
423 zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0];
424 zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1];
430 _Complex double zdotc = 0.0;
431 if (incx == 1 && incy == 1) {
432 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
433 zdotc += conj(Cd(&x[i])) * Cd(&y[i]);
436 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
437 zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]);
443 static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) {
444 integer n = *n_, incx = *incx_, incy = *incy_, i;
446 _Fcomplex zdotc = {0.0, 0.0};
447 if (incx == 1 && incy == 1) {
448 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
449 zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0];
450 zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1];
453 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
454 zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0];
455 zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1];
461 _Complex float zdotc = 0.0;
462 if (incx == 1 && incy == 1) {
463 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
464 zdotc += Cf(&x[i]) * Cf(&y[i]);
467 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
468 zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]);
474 static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) {
475 integer n = *n_, incx = *incx_, incy = *incy_, i;
477 _Dcomplex zdotc = {0.0, 0.0};
478 if (incx == 1 && incy == 1) {
479 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
480 zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0];
481 zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1];
484 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
485 zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0];
486 zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1];
492 _Complex double zdotc = 0.0;
493 if (incx == 1 && incy == 1) {
494 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
495 zdotc += Cd(&x[i]) * Cd(&y[i]);
498 for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */
499 zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]);
505 /* -- translated by f2c (version 20000121).
506 You must link the resulting object file with the libraries:
507 -lf2c -lm (in that order)
513 /* Table of constant values */
515 static integer c__1 = 1;
517 /* > \brief \b SLARRF finds a new relatively robust representation such that at least one of the eigenvalues i
518 s relatively isolated. */
520 /* =========== DOCUMENTATION =========== */
522 /* Online html documentation available at */
523 /* http://www.netlib.org/lapack/explore-html/ */
526 /* > Download SLARRF + dependencies */
527 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/slarrf.
530 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/slarrf.
533 /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/slarrf.
541 /* SUBROUTINE SLARRF( N, D, L, LD, CLSTRT, CLEND, */
543 /* SPDIAM, CLGAPL, CLGAPR, PIVMIN, SIGMA, */
544 /* DPLUS, LPLUS, WORK, INFO ) */
546 /* INTEGER CLSTRT, CLEND, INFO, N */
547 /* REAL CLGAPL, CLGAPR, PIVMIN, SIGMA, SPDIAM */
548 /* REAL D( * ), DPLUS( * ), L( * ), LD( * ), */
549 /* $ LPLUS( * ), W( * ), WGAP( * ), WERR( * ), WORK( * ) */
552 /* > \par Purpose: */
557 /* > Given the initial representation L D L^T and its cluster of close */
558 /* > eigenvalues (in a relative measure), W( CLSTRT ), W( CLSTRT+1 ), ... */
559 /* > W( CLEND ), SLARRF finds a new relatively robust representation */
560 /* > L D L^T - SIGMA I = L(+) D(+) L(+)^T such that at least one of the */
561 /* > eigenvalues of L(+) D(+) L(+)^T is relatively isolated. */
570 /* > The order of the matrix (subblock, if the matrix split). */
575 /* > D is REAL array, dimension (N) */
576 /* > The N diagonal elements of the diagonal matrix D. */
581 /* > L is REAL array, dimension (N-1) */
582 /* > The (N-1) subdiagonal elements of the unit bidiagonal */
586 /* > \param[in] LD */
588 /* > LD is REAL array, dimension (N-1) */
589 /* > The (N-1) elements L(i)*D(i). */
592 /* > \param[in] CLSTRT */
594 /* > CLSTRT is INTEGER */
595 /* > The index of the first eigenvalue in the cluster. */
598 /* > \param[in] CLEND */
600 /* > CLEND is INTEGER */
601 /* > The index of the last eigenvalue in the cluster. */
606 /* > W is REAL array, dimension */
607 /* > dimension is >= (CLEND-CLSTRT+1) */
608 /* > The eigenvalue APPROXIMATIONS of L D L^T in ascending order. */
609 /* > W( CLSTRT ) through W( CLEND ) form the cluster of relatively */
610 /* > close eigenalues. */
613 /* > \param[in,out] WGAP */
615 /* > WGAP is REAL array, dimension */
616 /* > dimension is >= (CLEND-CLSTRT+1) */
617 /* > The separation from the right neighbor eigenvalue in W. */
620 /* > \param[in] WERR */
622 /* > WERR is REAL array, dimension */
623 /* > dimension is >= (CLEND-CLSTRT+1) */
624 /* > WERR contain the semiwidth of the uncertainty */
625 /* > interval of the corresponding eigenvalue APPROXIMATION in W */
628 /* > \param[in] SPDIAM */
630 /* > SPDIAM is REAL */
631 /* > estimate of the spectral diameter obtained from the */
632 /* > Gerschgorin intervals */
635 /* > \param[in] CLGAPL */
637 /* > CLGAPL is REAL */
640 /* > \param[in] CLGAPR */
642 /* > CLGAPR is REAL */
643 /* > absolute gap on each end of the cluster. */
644 /* > Set by the calling routine to protect against shifts too close */
645 /* > to eigenvalues outside the cluster. */
648 /* > \param[in] PIVMIN */
650 /* > PIVMIN is REAL */
651 /* > The minimum pivot allowed in the Sturm sequence. */
654 /* > \param[out] SIGMA */
656 /* > SIGMA is REAL */
657 /* > The shift used to form L(+) D(+) L(+)^T. */
660 /* > \param[out] DPLUS */
662 /* > DPLUS is REAL array, dimension (N) */
663 /* > The N diagonal elements of the diagonal matrix D(+). */
666 /* > \param[out] LPLUS */
668 /* > LPLUS is REAL array, dimension (N-1) */
669 /* > The first (N-1) elements of LPLUS contain the subdiagonal */
670 /* > elements of the unit bidiagonal matrix L(+). */
673 /* > \param[out] WORK */
675 /* > WORK is REAL array, dimension (2*N) */
679 /* > \param[out] INFO */
681 /* > INFO is INTEGER */
682 /* > Signals processing OK (=0) or failure (=1) */
688 /* > \author Univ. of Tennessee */
689 /* > \author Univ. of California Berkeley */
690 /* > \author Univ. of Colorado Denver */
691 /* > \author NAG Ltd. */
693 /* > \date June 2016 */
695 /* > \ingroup OTHERauxiliary */
697 /* > \par Contributors: */
698 /* ================== */
700 /* > Beresford Parlett, University of California, Berkeley, USA \n */
701 /* > Jim Demmel, University of California, Berkeley, USA \n */
702 /* > Inderjit Dhillon, University of Texas, Austin, USA \n */
703 /* > Osni Marques, LBNL/NERSC, USA \n */
704 /* > Christof Voemel, University of California, Berkeley, USA */
706 /* ===================================================================== */
707 /* Subroutine */ int slarrf_(integer *n, real *d__, real *l, real *ld,
708 integer *clstrt, integer *clend, real *w, real *wgap, real *werr,
709 real *spdiam, real *clgapl, real *clgapr, real *pivmin, real *sigma,
710 real *dplus, real *lplus, real *work, integer *info)
712 /* System generated locals */
714 real r__1, r__2, r__3;
716 /* Local variables */
717 real growthbound, fail, fact, oldp;
723 real s, avgap, ldmax, rdmax;
725 extern /* Subroutine */ int scopy_(integer *, real *, integer *, real *,
727 real bestshift, smlgrowth;
730 extern real slamch_(char *);
732 real mingap, lsigma, rdelta;
735 extern logical sisnan_(real *);
736 logical sawnan1, sawnan2;
739 real max1, max2, rrr1, rrr2, znm2;
742 /* -- LAPACK auxiliary routine (version 3.7.1) -- */
743 /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */
744 /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */
748 /* ===================================================================== */
751 /* Parameter adjustments */
765 /* Quick return if possible */
772 eps = slamch_("Precision");
775 /* Note that we cannot guarantee that for any of the shifts tried, */
776 /* the factorization has a small or even moderate element growth. */
777 /* There could be Ritz values at both ends of the cluster and despite */
778 /* backing off, there are examples where all factorizations tried */
779 /* (in IEEE mode, allowing zero pivots & infinities) have INFINITE */
780 /* element growth. */
781 /* For this reason, we should use PIVMIN in this subroutine so that at */
782 /* least the L D L^T factorization exists. It can be checked afterwards */
783 /* whether the element growth caused bad residuals/orthogonality. */
784 /* Decide whether the code should accept the best among all */
785 /* representations despite large element growth or signal INFO=1 */
786 /* Setting NOFAIL to .FALSE. for quick fix for bug 113 */
789 /* Compute the average gap length of the cluster */
790 clwdth = (r__1 = w[*clend] - w[*clstrt], abs(r__1)) + werr[*clend] + werr[
792 avgap = clwdth / (real) (*clend - *clstrt);
793 mingap = f2cmin(*clgapl,*clgapr);
794 /* Initial values for shifts to both ends of cluster */
796 r__1 = w[*clstrt], r__2 = w[*clend];
797 lsigma = f2cmin(r__1,r__2) - werr[*clstrt];
799 r__1 = w[*clstrt], r__2 = w[*clend];
800 rsigma = f2cmax(r__1,r__2) + werr[*clend];
801 /* Use a small fudge to make sure that we really shift to the outside */
802 lsigma -= abs(lsigma) * 2.f * eps;
803 rsigma += abs(rsigma) * 2.f * eps;
804 /* Compute upper bounds for how much to back off the initial shifts */
805 ldmax = mingap * .25f + *pivmin * 2.f;
806 rdmax = mingap * .25f + *pivmin * 2.f;
808 r__1 = avgap, r__2 = wgap[*clstrt];
809 ldelta = f2cmax(r__1,r__2) / fact;
811 r__1 = avgap, r__2 = wgap[*clend - 1];
812 rdelta = f2cmax(r__1,r__2) / fact;
814 /* Initialize the record of the best representation found */
818 fail = (real) (*n - 1) * mingap / (*spdiam * eps);
819 fail2 = (real) (*n - 1) * mingap / (*spdiam * sqrt(eps));
822 /* while (KTRY <= KTRYMAX) */
824 growthbound = *spdiam * 8.f;
828 /* Ensure that we do not back off too much of the initial shifts */
829 ldelta = f2cmin(ldmax,ldelta);
830 rdelta = f2cmin(rdmax,rdelta);
831 /* Compute the element growth when shifting to both ends of the cluster */
832 /* accept the shift if there is no element growth at one of the two ends */
835 dplus[1] = d__[1] + s;
836 if (abs(dplus[1]) < *pivmin) {
837 dplus[1] = -(*pivmin);
838 /* Need to set SAWNAN1 because refined RRR test should not be used */
842 max1 = abs(dplus[1]);
844 for (i__ = 1; i__ <= i__1; ++i__) {
845 lplus[i__] = ld[i__] / dplus[i__];
846 s = s * lplus[i__] * l[i__] - lsigma;
847 dplus[i__ + 1] = d__[i__ + 1] + s;
848 if ((r__1 = dplus[i__ + 1], abs(r__1)) < *pivmin) {
849 dplus[i__ + 1] = -(*pivmin);
850 /* Need to set SAWNAN1 because refined RRR test should not be used */
855 r__2 = max1, r__3 = (r__1 = dplus[i__ + 1], abs(r__1));
856 max1 = f2cmax(r__2,r__3);
859 sawnan1 = sawnan1 || sisnan_(&max1);
860 if (forcer || max1 <= growthbound && ! sawnan1) {
867 work[1] = d__[1] + s;
868 if (abs(work[1]) < *pivmin) {
869 work[1] = -(*pivmin);
870 /* Need to set SAWNAN2 because refined RRR test should not be used */
876 for (i__ = 1; i__ <= i__1; ++i__) {
877 work[*n + i__] = ld[i__] / work[i__];
878 s = s * work[*n + i__] * l[i__] - rsigma;
879 work[i__ + 1] = d__[i__ + 1] + s;
880 if ((r__1 = work[i__ + 1], abs(r__1)) < *pivmin) {
881 work[i__ + 1] = -(*pivmin);
882 /* Need to set SAWNAN2 because refined RRR test should not be used */
887 r__2 = max2, r__3 = (r__1 = work[i__ + 1], abs(r__1));
888 max2 = f2cmax(r__2,r__3);
891 sawnan2 = sawnan2 || sisnan_(&max2);
892 if (forcer || max2 <= growthbound && ! sawnan2) {
897 /* If we are at this point, both shifts led to too much element growth */
898 /* Record the better of the two shifts (provided it didn't lead to NaN) */
899 if (sawnan1 && sawnan2) {
900 /* both MAX1 and MAX2 are NaN */
905 if (max1 <= smlgrowth) {
911 if (sawnan1 || max2 <= max1) {
914 if (max2 <= smlgrowth) {
920 /* If we are here, both the left and the right shift led to */
921 /* element growth. If the element growth is moderate, then */
922 /* we may still accept the representation, if it passes a */
923 /* refined test for RRR. This test supposes that no NaN occurred. */
924 /* Moreover, we use the refined RRR test only for isolated clusters. */
925 if (clwdth < mingap / 128.f && f2cmin(max1,max2) < fail2 && ! sawnan1 && !
932 if (tryrrr1 && dorrr1) {
934 tmp = (r__1 = dplus[*n], abs(r__1));
938 for (i__ = *n - 1; i__ >= 1; --i__) {
940 prod = dplus[i__ + 1] * work[*n + i__ + 1] / (dplus[i__] *
941 work[*n + i__]) * oldp;
943 prod *= (r__1 = work[*n + i__], abs(r__1));
946 /* Computing 2nd power */
950 r__2 = tmp, r__3 = (r__1 = dplus[i__] * prod, abs(r__1));
951 tmp = f2cmax(r__2,r__3);
954 rrr1 = tmp / (*spdiam * sqrt(znm2));
960 } else if (indx == 2) {
961 tmp = (r__1 = work[*n], abs(r__1));
965 for (i__ = *n - 1; i__ >= 1; --i__) {
967 prod = work[i__ + 1] * lplus[i__ + 1] / (work[i__] *
970 prod *= (r__1 = lplus[i__], abs(r__1));
973 /* Computing 2nd power */
977 r__2 = tmp, r__3 = (r__1 = work[i__] * prod, abs(r__1));
978 tmp = f2cmax(r__2,r__3);
981 rrr2 = tmp / (*spdiam * sqrt(znm2));
991 /* If we are here, both shifts failed also the RRR test. */
992 /* Back off to the outside */
994 r__1 = lsigma - ldelta, r__2 = lsigma - ldmax;
995 lsigma = f2cmax(r__1,r__2);
997 r__1 = rsigma + rdelta, r__2 = rsigma + rdmax;
998 rsigma = f2cmin(r__1,r__2);
1004 /* None of the representations investigated satisfied our */
1005 /* criteria. Take the best one we found. */
1006 if (smlgrowth < fail || nofail) {
1018 } else if (shift == 2) {
1019 /* store new L and D back into DPLUS, LPLUS */
1020 scopy_(n, &work[1], &c__1, &dplus[1], &c__1);
1022 scopy_(&i__1, &work[*n + 1], &c__1, &lplus[1], &c__1);