37 #ifndef TEMPLATE_LAPACK_HGEQZ_HEADER
38 #define TEMPLATE_LAPACK_HGEQZ_HEADER
44 b,
const integer *ldb, Treal *alphar, Treal *alphai, Treal *
45 beta, Treal *q,
const integer *ldq, Treal *z__,
const integer *ldz,
256 integer a_dim1, a_offset, b_dim1, b_offset, q_dim1, q_offset, z_dim1,
257 z_offset, i__1, i__2, i__3, i__4;
258 Treal d__1, d__2, d__3, d__4;
260 Treal ad11l, ad12l, ad21l, ad22l, ad32l, wabs, atol, btol,
262 Treal temp2, s1inv, c__;
264 Treal s, t, v[3], scale;
268 Treal tempi, tempr, s1, s2, u1, u2;
270 Treal a11, a12, a21, a22, b11, b22, c12, c21;
272 Treal an, bn, cl, cq,
cr;
274 Treal ascale, bscale, u12, w11;
276 Treal cz, sl, w12, w21, w22, wi;
297 Treal wr2, ad11, ad12, ad21, ad22, c11i, c22i;
299 Treal c11r, c22r, u12l;
303 Treal ulp, sqr, szi, szr;
304 #define a_ref(a_1,a_2) a[(a_2)*a_dim1 + a_1]
305 #define b_ref(a_1,a_2) b[(a_2)*b_dim1 + a_1]
306 #define q_ref(a_1,a_2) q[(a_2)*q_dim1 + a_1]
307 #define z___ref(a_1,a_2) z__[(a_2)*z_dim1 + a_1]
311 a_offset = 1 + a_dim1 * 1;
314 b_offset = 1 + b_dim1 * 1;
320 q_offset = 1 + q_dim1 * 1;
323 z_offset = 1 + z_dim1 * 1;
328 ilschr = ilq = ilz = 0;
370 lquery = *lwork == -1;
373 }
else if (icompq == 0) {
375 }
else if (icompz == 0) {
379 }
else if (*ilo < 1) {
381 }
else if (*ihi > *n || *ihi < *ilo - 1) {
383 }
else if (*lda < *n) {
385 }
else if (*ldb < *n) {
387 }
else if (*ldq < 1 || ( ilq && *ldq < *n ) ) {
389 }
else if (*ldz < 1 || ( ilz && *ldz < *n ) ) {
391 }
else if (*lwork <
maxMACRO(1,*n) && ! lquery) {
420 in = *ihi + 1 - *ilo;
422 safmax = 1. / safmin;
424 anorm =
dlanhs_(
"F", &in, &
a_ref(*ilo, *ilo), lda, &work[1]);
425 bnorm =
dlanhs_(
"F", &in, &
b_ref(*ilo, *ilo), ldb, &work[1]);
427 d__1 = safmin, d__2 = ulp * anorm;
430 d__1 = safmin, d__2 = ulp * bnorm;
432 ascale = 1. /
maxMACRO(safmin,anorm);
433 bscale = 1. /
maxMACRO(safmin,bnorm);
438 for (j = *ihi + 1; j <= i__1; ++j) {
439 if (
b_ref(j, j) < 0.) {
442 for (jr = 1; jr <= i__2; ++jr) {
453 for (jr = 1; jr <= i__2; ++jr) {
459 alphar[j] =
a_ref(j, j);
461 beta[j] =
b_ref(j, j);
496 maxit = (*ihi - *ilo + 1) * 30;
499 for (jiter = 1; jiter <= i__1; ++jiter) {
513 if ((d__1 =
a_ref(ilast, ilast - 1),
absMACRO(d__1)) <= atol) {
514 a_ref(ilast, ilast - 1) = 0.;
520 b_ref(ilast, ilast) = 0.;
527 for (j = ilast - 1; j >= i__2; --j) {
535 a_ref(j, j - 1) = 0.;
554 if (tempr < 1. && tempr != 0.) {
558 if (temp * (ascale * (d__1 =
a_ref(j + 1, j),
absMACRO(d__1)))
559 <= temp2 * (ascale * atol)) {
570 if (ilazro || ilazr2) {
572 for (jch = j; jch <= i__3; ++jch) {
573 temp =
a_ref(jch, jch);
576 a_ref(jch + 1, jch) = 0.;
579 1, jch + 1), lda, &c__, &s);
582 1, jch + 1), ldb, &c__, &s);
588 a_ref(jch, jch - 1) =
a_ref(jch, jch - 1) * c__;
593 if (jch + 1 >= ilast) {
600 b_ref(jch + 1, jch + 1) = 0.;
610 for (jch = j; jch <= i__3; ++jch) {
611 temp =
b_ref(jch, jch + 1);
613 b_ref(jch, jch + 1));
614 b_ref(jch + 1, jch + 1) = 0.;
615 if (jch < ilastm - 1) {
616 i__4 = ilastm - jch - 1;
618 jch + 1, jch + 2), ldb, &c__, &s);
620 i__4 = ilastm - jch + 2;
622 1, jch - 1), lda, &c__, &s);
627 temp =
a_ref(jch + 1, jch);
629 a_ref(jch + 1, jch));
630 a_ref(jch + 1, jch - 1) = 0.;
631 i__4 = jch + 1 - ifrstm;
633 ifrstm, jch - 1), &c__1, &c__, &s);
636 ifrstm, jch - 1), &c__1, &c__, &s);
639 - 1), &c__1, &c__, &s);
667 temp =
a_ref(ilast, ilast);
670 a_ref(ilast, ilast - 1) = 0.;
671 i__2 = ilast - ifrstm;
674 i__2 = ilast - ifrstm;
686 if (
b_ref(ilast, ilast) < 0.) {
689 for (j = ifrstm; j <= i__2; ++j) {
700 for (j = 1; j <= i__2; ++j) {
706 alphar[ilast] =
a_ref(ilast, ilast);
708 beta[ilast] =
b_ref(ilast, ilast);
723 if (ifrstm > ilast) {
746 if (iiter / 10 * 10 == iiter) {
751 if ((Treal) maxit * safmin * (d__1 =
a_ref(ilast - 1, ilast),
754 eshift +=
a_ref(ilast - 1, ilast) /
b_ref(ilast - 1, ilast -
757 eshift += 1. / (safmin * (Treal) maxit);
768 d__1 = safmin * 100.;
770 - 1), ldb, &d__1, &s1, &s2, &wr, &wr2, &wi);
775 d__1 = s1, d__2 = safmin *
maxMACRO(d__3,d__4);
784 temp =
minMACRO(ascale,1.) * (safmax * .5);
791 temp =
minMACRO(bscale,1.) * (safmax * .5);
794 d__1 = scale, d__2 = temp /
absMACRO(wr);
803 for (j = ilast - 1; j >= i__2; --j) {
808 if (tempr < 1. && tempr != 0.) {
812 if ((d__1 = ascale *
a_ref(j + 1, j) * temp,
absMACRO(d__1)) <= ascale
826 temp = s1 *
a_ref(istart, istart) - wr *
b_ref(istart, istart);
827 temp2 = s1 *
a_ref(istart + 1, istart);
833 for (j = istart; j <= i__2; ++j) {
835 temp =
a_ref(j, j - 1);
838 a_ref(j + 1, j - 1) = 0.;
842 for (jc = j; jc <= i__3; ++jc) {
843 temp = c__ *
a_ref(j, jc) + s *
a_ref(j + 1, jc);
846 temp2 = c__ *
b_ref(j, jc) + s *
b_ref(j + 1, jc);
848 b_ref(j, jc) = temp2;
853 for (jr = 1; jr <= i__3; ++jr) {
854 temp = c__ *
q_ref(jr, j) + s *
q_ref(jr, j + 1);
862 temp =
b_ref(j + 1, j + 1);
864 b_ref(j + 1, j) = 0.;
869 for (jr = ifrstm; jr <= i__3; ++jr) {
870 temp = c__ *
a_ref(jr, j + 1) + s *
a_ref(jr, j);
872 a_ref(jr, j + 1) = temp;
876 for (jr = ifrstm; jr <= i__3; ++jr) {
877 temp = c__ *
b_ref(jr, j + 1) + s *
b_ref(jr, j);
879 b_ref(jr, j + 1) = temp;
884 for (jr = 1; jr <= i__3; ++jr) {
905 if (ifirst + 1 == ilast) {
916 b_ref(ilast, ilast), &b22, &b11, &sr, &
cr, &sl, &cl);
925 i__2 = ilastm + 1 - ifirst;
927 ilast - 1), lda, &cl, &sl);
928 i__2 = ilast + 1 - ifrstm;
930 ilast), &c__1, &
cr, &sr);
932 if (ilast < ilastm) {
933 i__2 = ilastm - ilast;
935 ilast + 1), lda, &cl, &sl);
937 if (ifrstm < ilast - 1) {
938 i__2 = ifirst - ifrstm;
940 ilast), &c__1, &
cr, &sr);
952 b_ref(ilast - 1, ilast - 1) = b11;
953 b_ref(ilast - 1, ilast) = 0.;
954 b_ref(ilast, ilast - 1) = 0.;
955 b_ref(ilast, ilast) = b22;
961 for (j = ifrstm; j <= i__2; ++j) {
969 for (j = 1; j <= i__2; ++j) {
980 d__1 = safmin * 100.;
982 - 1), ldb, &d__1, &s1, &temp, &wr, &temp2, &wi);
994 a11 =
a_ref(ilast - 1, ilast - 1);
995 a21 =
a_ref(ilast, ilast - 1);
996 a12 =
a_ref(ilast - 1, ilast);
997 a22 =
a_ref(ilast, ilast);
1005 c11r = s1 * a11 - wr * b11;
1009 c22r = s1 * a22 - wr * b22;
1029 szr = -c21 * tempr / t;
1030 szi = c21 * tempi / t;
1043 if (s1 * an > wabs * bn) {
1048 a1r = cz * a11 + szr * a12;
1050 a2r = cz * a21 + szr * a22;
1060 sqr = tempr * a2r + tempi * a2i;
1061 sqi = tempi * a2r - tempr * a2i;
1071 tempr = sqr * szr - sqi * szi;
1072 tempi = sqr * szi + sqi * szr;
1073 b1r = cq * cz * b11 + tempr * b22;
1076 b2r = cq * cz * b22 + tempr * b11;
1082 beta[ilast - 1] = b1a;
1084 alphar[ilast - 1] = wr * b1a * s1inv;
1085 alphai[ilast - 1] = wi * b1a * s1inv;
1086 alphar[ilast] = wr * b2a * s1inv;
1087 alphai[ilast] = -(wi * b2a) * s1inv;
1102 if (ifrstm > ilast) {
1121 ad11 = ascale *
a_ref(ilast - 1, ilast - 1) / (bscale *
b_ref(
1122 ilast - 1, ilast - 1));
1123 ad21 = ascale *
a_ref(ilast, ilast - 1) / (bscale *
b_ref(ilast -
1125 ad12 = ascale *
a_ref(ilast - 1, ilast) / (bscale *
b_ref(ilast,
1127 ad22 = ascale *
a_ref(ilast, ilast) / (bscale *
b_ref(ilast,
1129 u12 =
b_ref(ilast - 1, ilast) /
b_ref(ilast, ilast);
1130 ad11l = ascale *
a_ref(ifirst, ifirst) / (bscale *
b_ref(ifirst,
1132 ad21l = ascale *
a_ref(ifirst + 1, ifirst) / (bscale *
b_ref(
1134 ad12l = ascale *
a_ref(ifirst, ifirst + 1) / (bscale *
b_ref(
1135 ifirst + 1, ifirst + 1));
1136 ad22l = ascale *
a_ref(ifirst + 1, ifirst + 1) / (bscale *
b_ref(
1137 ifirst + 1, ifirst + 1));
1138 ad32l = ascale *
a_ref(ifirst + 2, ifirst + 1) / (bscale *
b_ref(
1139 ifirst + 1, ifirst + 1));
1140 u12l =
b_ref(ifirst, ifirst + 1) /
b_ref(ifirst + 1, ifirst + 1);
1142 v[0] = (ad11 - ad11l) * (ad22 - ad11l) - ad12 * ad21 + ad21 * u12
1143 * ad11l + (ad12l - ad11l * u12l) * ad21l;
1144 v[1] = (ad22l - ad11l - ad21l * u12l - (ad11 - ad11l) - (ad22 -
1145 ad11l) + ad21 * u12) * ad21l;
1146 v[2] = ad32l * ad21l;
1156 for (j = istart; j <= i__2; ++j) {
1163 v[0] =
a_ref(j, j - 1);
1164 v[1] =
a_ref(j + 1, j - 1);
1165 v[2] =
a_ref(j + 2, j - 1);
1169 a_ref(j + 1, j - 1) = 0.;
1170 a_ref(j + 2, j - 1) = 0.;
1174 for (jc = j; jc <= i__3; ++jc) {
1175 temp = tau * (
a_ref(j, jc) + v[1] *
a_ref(j + 1, jc) + v[
1176 2] *
a_ref(j + 2, jc));
1178 a_ref(j + 1, jc) =
a_ref(j + 1, jc) - temp * v[1];
1179 a_ref(j + 2, jc) =
a_ref(j + 2, jc) - temp * v[2];
1180 temp2 = tau * (
b_ref(j, jc) + v[1] *
b_ref(j + 1, jc) + v[
1181 2] *
b_ref(j + 2, jc));
1183 b_ref(j + 1, jc) =
b_ref(j + 1, jc) - temp2 * v[1];
1184 b_ref(j + 2, jc) =
b_ref(j + 2, jc) - temp2 * v[2];
1189 for (jr = 1; jr <= i__3; ++jr) {
1190 temp = tau * (
q_ref(jr, j) + v[1] *
q_ref(jr, j + 1)
1191 + v[2] *
q_ref(jr, j + 2));
1193 q_ref(jr, j + 1) =
q_ref(jr, j + 1) - temp * v[1];
1194 q_ref(jr, j + 2) =
q_ref(jr, j + 2) - temp * v[2];
1205 d__3 = (d__1 =
b_ref(j + 1, j + 1),
absMACRO(d__1)), d__4 = (d__2 =
1209 d__3 = (d__1 =
b_ref(j + 2, j + 1),
absMACRO(d__1)), d__4 = (d__2 =
1212 if (
maxMACRO(temp,temp2) < safmin) {
1217 }
else if (temp >= temp2) {
1218 w11 =
b_ref(j + 1, j + 1);
1219 w21 =
b_ref(j + 2, j + 1);
1220 w12 =
b_ref(j + 1, j + 2);
1221 w22 =
b_ref(j + 2, j + 2);
1222 u1 =
b_ref(j + 1, j);
1223 u2 =
b_ref(j + 2, j);
1225 w21 =
b_ref(j + 1, j + 1);
1226 w11 =
b_ref(j + 2, j + 1);
1227 w22 =
b_ref(j + 1, j + 2);
1228 w12 =
b_ref(j + 2, j + 2);
1229 u2 =
b_ref(j + 1, j);
1230 u1 =
b_ref(j + 2, j);
1262 scale = (d__1 = w22 / u2,
absMACRO(d__1));
1266 d__2 = scale, d__3 = (d__1 = w11 / u1,
absMACRO(d__1));
1272 u2 = scale * u2 / w22;
1273 u1 = (scale * u1 - w12 * u2) / w11;
1291 tau = scale / t + 1.;
1292 vs = -1. / (scale + t);
1302 for (jr = ifrstm; jr <= i__3; ++jr) {
1303 temp = tau * (
a_ref(jr, j) + v[1] *
a_ref(jr, j + 1) + v[
1304 2] *
a_ref(jr, j + 2));
1306 a_ref(jr, j + 1) =
a_ref(jr, j + 1) - temp * v[1];
1307 a_ref(jr, j + 2) =
a_ref(jr, j + 2) - temp * v[2];
1311 for (jr = ifrstm; jr <= i__3; ++jr) {
1312 temp = tau * (
b_ref(jr, j) + v[1] *
b_ref(jr, j + 1) + v[
1313 2] *
b_ref(jr, j + 2));
1315 b_ref(jr, j + 1) =
b_ref(jr, j + 1) - temp * v[1];
1316 b_ref(jr, j + 2) =
b_ref(jr, j + 2) - temp * v[2];
1321 for (jr = 1; jr <= i__3; ++jr) {
1323 1) + v[2] *
z___ref(jr, j + 2));
1330 b_ref(j + 1, j) = 0.;
1331 b_ref(j + 2, j) = 0.;
1340 temp =
a_ref(j, j - 1);
1342 a_ref(j + 1, j - 1) = 0.;
1345 for (jc = j; jc <= i__2; ++jc) {
1346 temp = c__ *
a_ref(j, jc) + s *
a_ref(j + 1, jc);
1348 a_ref(j, jc) = temp;
1349 temp2 = c__ *
b_ref(j, jc) + s *
b_ref(j + 1, jc);
1351 b_ref(j, jc) = temp2;
1356 for (jr = 1; jr <= i__2; ++jr) {
1357 temp = c__ *
q_ref(jr, j) + s *
q_ref(jr, j + 1);
1360 q_ref(jr, j) = temp;
1367 temp =
b_ref(j + 1, j + 1);
1369 b_ref(j + 1, j) = 0.;
1372 for (jr = ifrstm; jr <= i__2; ++jr) {
1373 temp = c__ *
a_ref(jr, j + 1) + s *
a_ref(jr, j);
1375 a_ref(jr, j + 1) = temp;
1379 for (jr = ifrstm; jr <= i__2; ++jr) {
1380 temp = c__ *
b_ref(jr, j + 1) + s *
b_ref(jr, j);
1382 b_ref(jr, j + 1) = temp;
1387 for (jr = 1; jr <= i__2; ++jr) {
1422 for (j = 1; j <= i__1; ++j) {
1423 if (
b_ref(j, j) < 0.) {
1426 for (jr = 1; jr <= i__2; ++jr) {
1437 for (jr = 1; jr <= i__2; ++jr) {
1443 alphar[j] =
a_ref(j, j);
1445 beta[j] =
b_ref(j, j);
1456 work[1] = (Treal) (*n);