10 #ifndef EIGEN_GENERAL_BLOCK_PANEL_H
11 #define EIGEN_GENERAL_BLOCK_PANEL_H
17 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs=false,
bool _ConjRhs=false>
22 inline std::ptrdiff_t manage_caching_sizes_helper(std::ptrdiff_t a, std::ptrdiff_t b)
28 inline void manage_caching_sizes(Action action, std::ptrdiff_t* l1=0, std::ptrdiff_t* l2=0)
30 static std::ptrdiff_t m_l1CacheSize = 0;
31 static std::ptrdiff_t m_l2CacheSize = 0;
34 m_l1CacheSize = manage_caching_sizes_helper(queryL1CacheSize(),8 * 1024);
35 m_l2CacheSize = manage_caching_sizes_helper(queryTopLevelCacheSize(),1*1024*1024);
41 eigen_internal_assert(l1!=0 && l2!=0);
45 else if(action==GetAction)
47 eigen_internal_assert(l1!=0 && l2!=0);
53 eigen_internal_assert(
false);
72 template<
typename LhsScalar,
typename RhsScalar,
int KcFactor,
typename SizeType>
75 EIGEN_UNUSED_VARIABLE(n);
83 std::ptrdiff_t l1, l2;
85 typedef gebp_traits<LhsScalar,RhsScalar> Traits;
87 kdiv = KcFactor * 2 * Traits::nr
88 * Traits::RhsProgress *
sizeof(RhsScalar),
89 mr = gebp_traits<LhsScalar,RhsScalar>::mr,
90 mr_mask = (0xffffffff/mr)*mr
93 manage_caching_sizes(GetAction, &l1, &l2);
94 k = std::min<SizeType>(k, l1/kdiv);
95 SizeType _m = k>0 ? l2/(4 *
sizeof(LhsScalar) * k) : 0;
96 if(_m<m) m = _m & mr_mask;
99 template<
typename LhsScalar,
typename RhsScalar,
typename SizeType>
102 computeProductBlockingSizes<LhsScalar,RhsScalar,1>(k, m, n);
105 #ifdef EIGEN_HAS_FUSE_CJMADD
106 #define MADD(CJ,A,B,C,T) C = CJ.pmadd(A,B,C);
111 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
struct gebp_madd_selector {
112 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, A& a, B& b, C& c, T& )
118 template<
typename CJ,
typename T>
struct gebp_madd_selector<CJ,T,T,T,T> {
119 EIGEN_ALWAYS_INLINE
static void run(
const CJ& cj, T& a, T& b, T& c, T& t)
121 t = b; t = cj.pmul(a,t); c = padd(c,t);
125 template<
typename CJ,
typename A,
typename B,
typename C,
typename T>
126 EIGEN_STRONG_INLINE
void gebp_madd(
const CJ& cj, A& a, B& b, C& c, T& t)
128 gebp_madd_selector<CJ,A,B,C,T>::run(cj,a,b,c,t);
131 #define MADD(CJ,A,B,C,T) gebp_madd(CJ,A,B,C,T);
145 template<
typename _LhsScalar,
typename _RhsScalar,
bool _ConjLhs,
bool _ConjRhs>
149 typedef _LhsScalar LhsScalar;
150 typedef _RhsScalar RhsScalar;
151 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
156 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
157 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
158 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
159 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
161 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
164 nr = NumberOfRegisters/4,
167 mr = 2 * LhsPacketSize,
169 WorkSpaceFactor = nr * RhsPacketSize,
171 LhsProgress = LhsPacketSize,
172 RhsProgress = RhsPacketSize
175 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
176 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
177 typedef typename packet_traits<ResScalar>::type _ResPacket;
179 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
180 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
181 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
183 typedef ResPacket AccPacket;
185 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
187 p = pset1<ResPacket>(ResScalar(0));
190 EIGEN_STRONG_INLINE
void unpackRhs(DenseIndex n,
const RhsScalar* rhs, RhsScalar* b)
192 for(DenseIndex k=0; k<n; k++)
193 pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
196 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
198 dest = pload<RhsPacket>(b);
201 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
203 dest = pload<LhsPacket>(a);
206 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, AccPacket& tmp)
const
208 tmp = b; tmp = pmul(a,tmp); c = padd(c,tmp);
211 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
213 r = pmadd(c,alpha,r);
221 template<
typename RealScalar,
bool _ConjLhs>
222 class gebp_traits<std::complex<RealScalar>, RealScalar, _ConjLhs, false>
225 typedef std::complex<RealScalar> LhsScalar;
226 typedef RealScalar RhsScalar;
227 typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar;
232 Vectorizable = packet_traits<LhsScalar>::Vectorizable && packet_traits<RhsScalar>::Vectorizable,
233 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
234 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
235 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
237 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
238 nr = NumberOfRegisters/4,
239 mr = 2 * LhsPacketSize,
240 WorkSpaceFactor = nr*RhsPacketSize,
242 LhsProgress = LhsPacketSize,
243 RhsProgress = RhsPacketSize
246 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
247 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
248 typedef typename packet_traits<ResScalar>::type _ResPacket;
250 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
251 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
252 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
254 typedef ResPacket AccPacket;
256 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
258 p = pset1<ResPacket>(ResScalar(0));
261 EIGEN_STRONG_INLINE
void unpackRhs(DenseIndex n,
const RhsScalar* rhs, RhsScalar* b)
263 for(DenseIndex k=0; k<n; k++)
264 pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
267 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
269 dest = pload<RhsPacket>(b);
272 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
274 dest = pload<LhsPacket>(a);
277 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
279 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
282 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
284 tmp = b; tmp = pmul(a.v,tmp); c.v = padd(c.v,tmp);
287 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
292 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
294 r = cj.pmadd(c,alpha,r);
298 conj_helper<ResPacket,ResPacket,ConjLhs,false> cj;
301 template<
typename RealScalar,
bool _ConjLhs,
bool _ConjRhs>
302 class gebp_traits<std::complex<RealScalar>, std::complex<RealScalar>, _ConjLhs, _ConjRhs >
305 typedef std::complex<RealScalar> Scalar;
306 typedef std::complex<RealScalar> LhsScalar;
307 typedef std::complex<RealScalar> RhsScalar;
308 typedef std::complex<RealScalar> ResScalar;
313 Vectorizable = packet_traits<RealScalar>::Vectorizable
314 && packet_traits<Scalar>::Vectorizable,
315 RealPacketSize = Vectorizable ? packet_traits<RealScalar>::size : 1,
316 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
319 mr = 2 * ResPacketSize,
320 WorkSpaceFactor = Vectorizable ? 2*nr*RealPacketSize : nr,
322 LhsProgress = ResPacketSize,
323 RhsProgress = Vectorizable ? 2*ResPacketSize : 1
326 typedef typename packet_traits<RealScalar>::type RealPacket;
327 typedef typename packet_traits<Scalar>::type ScalarPacket;
334 typedef typename conditional<Vectorizable,RealPacket, Scalar>::type LhsPacket;
335 typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type RhsPacket;
336 typedef typename conditional<Vectorizable,ScalarPacket,Scalar>::type ResPacket;
337 typedef typename conditional<Vectorizable,DoublePacket,Scalar>::type AccPacket;
339 EIGEN_STRONG_INLINE
void initAcc(Scalar& p) { p = Scalar(0); }
341 EIGEN_STRONG_INLINE
void initAcc(DoublePacket& p)
343 p.first = pset1<RealPacket>(RealScalar(0));
344 p.second = pset1<RealPacket>(RealScalar(0));
351 EIGEN_STRONG_INLINE
void unpackRhs(DenseIndex n,
const Scalar* rhs, Scalar* b)
353 for(DenseIndex k=0; k<n; k++)
357 pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+0], real(rhs[k]));
358 pstore1<RealPacket>((RealScalar*)&b[k*ResPacketSize*2+ResPacketSize], imag(rhs[k]));
365 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, ResPacket& dest)
const { dest = *b; }
367 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, DoublePacket& dest)
const
369 dest.first = pload<RealPacket>((
const RealScalar*)b);
370 dest.second = pload<RealPacket>((
const RealScalar*)(b+ResPacketSize));
374 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
376 dest = pload<LhsPacket>((
const typename unpacket_traits<LhsPacket>::type*)(a));
379 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, DoublePacket& c, RhsPacket& )
const
381 c.first = padd(pmul(a,b.first), c.first);
382 c.second = padd(pmul(a,b.second),c.second);
385 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, ResPacket& c, RhsPacket& )
const
390 EIGEN_STRONG_INLINE
void acc(
const Scalar& c,
const Scalar& alpha, Scalar& r)
const { r += alpha * c; }
392 EIGEN_STRONG_INLINE
void acc(
const DoublePacket& c,
const ResPacket& alpha, ResPacket& r)
const
396 if((!ConjLhs)&&(!ConjRhs))
398 tmp = pcplxflip(pconj(ResPacket(c.second)));
399 tmp = padd(ResPacket(c.first),tmp);
401 else if((!ConjLhs)&&(ConjRhs))
403 tmp = pconj(pcplxflip(ResPacket(c.second)));
404 tmp = padd(ResPacket(c.first),tmp);
406 else if((ConjLhs)&&(!ConjRhs))
408 tmp = pcplxflip(ResPacket(c.second));
409 tmp = padd(pconj(ResPacket(c.first)),tmp);
411 else if((ConjLhs)&&(ConjRhs))
413 tmp = pcplxflip(ResPacket(c.second));
414 tmp = psub(pconj(ResPacket(c.first)),tmp);
417 r = pmadd(tmp,alpha,r);
421 conj_helper<LhsScalar,RhsScalar,ConjLhs,ConjRhs> cj;
424 template<
typename RealScalar,
bool _ConjRhs>
425 class gebp_traits<RealScalar, std::complex<RealScalar>, false, _ConjRhs >
428 typedef std::complex<RealScalar> Scalar;
429 typedef RealScalar LhsScalar;
430 typedef Scalar RhsScalar;
431 typedef Scalar ResScalar;
436 Vectorizable = packet_traits<RealScalar>::Vectorizable
437 && packet_traits<Scalar>::Vectorizable,
438 LhsPacketSize = Vectorizable ? packet_traits<LhsScalar>::size : 1,
439 RhsPacketSize = Vectorizable ? packet_traits<RhsScalar>::size : 1,
440 ResPacketSize = Vectorizable ? packet_traits<ResScalar>::size : 1,
442 NumberOfRegisters = EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS,
444 mr = 2*ResPacketSize,
445 WorkSpaceFactor = nr*RhsPacketSize,
447 LhsProgress = ResPacketSize,
448 RhsProgress = ResPacketSize
451 typedef typename packet_traits<LhsScalar>::type _LhsPacket;
452 typedef typename packet_traits<RhsScalar>::type _RhsPacket;
453 typedef typename packet_traits<ResScalar>::type _ResPacket;
455 typedef typename conditional<Vectorizable,_LhsPacket,LhsScalar>::type LhsPacket;
456 typedef typename conditional<Vectorizable,_RhsPacket,RhsScalar>::type RhsPacket;
457 typedef typename conditional<Vectorizable,_ResPacket,ResScalar>::type ResPacket;
459 typedef ResPacket AccPacket;
461 EIGEN_STRONG_INLINE
void initAcc(AccPacket& p)
463 p = pset1<ResPacket>(ResScalar(0));
466 EIGEN_STRONG_INLINE
void unpackRhs(DenseIndex n,
const RhsScalar* rhs, RhsScalar* b)
468 for(DenseIndex k=0; k<n; k++)
469 pstore1<RhsPacket>(&b[k*RhsPacketSize], rhs[k]);
472 EIGEN_STRONG_INLINE
void loadRhs(
const RhsScalar* b, RhsPacket& dest)
const
474 dest = pload<RhsPacket>(b);
477 EIGEN_STRONG_INLINE
void loadLhs(
const LhsScalar* a, LhsPacket& dest)
const
479 dest = ploaddup<LhsPacket>(a);
482 EIGEN_STRONG_INLINE
void madd(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp)
const
484 madd_impl(a, b, c, tmp,
typename conditional<Vectorizable,true_type,false_type>::type());
487 EIGEN_STRONG_INLINE
void madd_impl(
const LhsPacket& a,
const RhsPacket& b, AccPacket& c, RhsPacket& tmp,
const true_type&)
const
489 tmp = b; tmp.v = pmul(a,tmp.v); c = padd(c,tmp);
492 EIGEN_STRONG_INLINE
void madd_impl(
const LhsScalar& a,
const RhsScalar& b, ResScalar& c, RhsScalar& ,
const false_type&)
const
497 EIGEN_STRONG_INLINE
void acc(
const AccPacket& c,
const ResPacket& alpha, ResPacket& r)
const
499 r = cj.pmadd(alpha,c,r);
503 conj_helper<ResPacket,ResPacket,false,ConjRhs> cj;
513 template<
typename LhsScalar,
typename RhsScalar,
typename Index,
int mr,
int nr,
bool ConjugateLhs,
bool ConjugateRhs>
516 typedef gebp_traits<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> Traits;
517 typedef typename Traits::ResScalar ResScalar;
518 typedef typename Traits::LhsPacket LhsPacket;
519 typedef typename Traits::RhsPacket RhsPacket;
520 typedef typename Traits::ResPacket ResPacket;
521 typedef typename Traits::AccPacket AccPacket;
524 Vectorizable = Traits::Vectorizable,
525 LhsProgress = Traits::LhsProgress,
526 RhsProgress = Traits::RhsProgress,
527 ResPacketSize = Traits::ResPacketSize
530 EIGEN_DONT_INLINE EIGEN_FLATTEN_ATTRIB
531 void operator()(ResScalar* res, Index resStride,
const LhsScalar* blockA,
const RhsScalar* blockB, Index rows, Index depth, Index cols, ResScalar alpha,
532 Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0, RhsScalar* unpackedB = 0)
536 if(strideA==-1) strideA = depth;
537 if(strideB==-1) strideB = depth;
538 conj_helper<LhsScalar,RhsScalar,ConjugateLhs,ConjugateRhs> cj;
540 Index packet_cols = (cols/nr) * nr;
541 const Index peeled_mc = (rows/mr)*mr;
543 const Index peeled_mc2 = peeled_mc + (rows-peeled_mc >= LhsProgress ? LhsProgress : 0);
544 const Index peeled_kc = (depth/4)*4;
547 unpackedB =
const_cast<RhsScalar*
>(blockB - strideB * nr * RhsProgress);
550 for(Index j2=0; j2<packet_cols; j2+=nr)
552 traits.unpackRhs(depth*nr,&blockB[j2*strideB+offsetB*nr],unpackedB);
557 for(Index i=0; i<peeled_mc; i+=mr)
559 const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
563 AccPacket C0, C1, C2, C3, C4, C5, C6, C7;
566 if(nr==4) traits.initAcc(C2);
567 if(nr==4) traits.initAcc(C3);
570 if(nr==4) traits.initAcc(C6);
571 if(nr==4) traits.initAcc(C7);
573 ResScalar* r0 = &res[(j2+0)*resStride + i];
574 ResScalar* r1 = r0 + resStride;
575 ResScalar* r2 = r1 + resStride;
576 ResScalar* r3 = r2 + resStride;
586 const RhsScalar* blB = unpackedB;
587 for(Index k=0; k<peeled_kc; k+=4)
595 EIGEN_ASM_COMMENT(
"mybegin2");
596 traits.loadLhs(&blA[0*LhsProgress], A0);
597 traits.loadLhs(&blA[1*LhsProgress], A1);
598 traits.loadRhs(&blB[0*RhsProgress], B_0);
599 traits.madd(A0,B_0,C0,T0);
600 traits.madd(A1,B_0,C4,B_0);
601 traits.loadRhs(&blB[1*RhsProgress], B_0);
602 traits.madd(A0,B_0,C1,T0);
603 traits.madd(A1,B_0,C5,B_0);
605 traits.loadLhs(&blA[2*LhsProgress], A0);
606 traits.loadLhs(&blA[3*LhsProgress], A1);
607 traits.loadRhs(&blB[2*RhsProgress], B_0);
608 traits.madd(A0,B_0,C0,T0);
609 traits.madd(A1,B_0,C4,B_0);
610 traits.loadRhs(&blB[3*RhsProgress], B_0);
611 traits.madd(A0,B_0,C1,T0);
612 traits.madd(A1,B_0,C5,B_0);
614 traits.loadLhs(&blA[4*LhsProgress], A0);
615 traits.loadLhs(&blA[5*LhsProgress], A1);
616 traits.loadRhs(&blB[4*RhsProgress], B_0);
617 traits.madd(A0,B_0,C0,T0);
618 traits.madd(A1,B_0,C4,B_0);
619 traits.loadRhs(&blB[5*RhsProgress], B_0);
620 traits.madd(A0,B_0,C1,T0);
621 traits.madd(A1,B_0,C5,B_0);
623 traits.loadLhs(&blA[6*LhsProgress], A0);
624 traits.loadLhs(&blA[7*LhsProgress], A1);
625 traits.loadRhs(&blB[6*RhsProgress], B_0);
626 traits.madd(A0,B_0,C0,T0);
627 traits.madd(A1,B_0,C4,B_0);
628 traits.loadRhs(&blB[7*RhsProgress], B_0);
629 traits.madd(A0,B_0,C1,T0);
630 traits.madd(A1,B_0,C5,B_0);
631 EIGEN_ASM_COMMENT(
"myend");
635 EIGEN_ASM_COMMENT(
"mybegin4");
637 RhsPacket B_0, B1, B2, B3;
640 traits.loadLhs(&blA[0*LhsProgress], A0);
641 traits.loadLhs(&blA[1*LhsProgress], A1);
642 traits.loadRhs(&blB[0*RhsProgress], B_0);
643 traits.loadRhs(&blB[1*RhsProgress], B1);
645 traits.madd(A0,B_0,C0,T0);
646 traits.loadRhs(&blB[2*RhsProgress], B2);
647 traits.madd(A1,B_0,C4,B_0);
648 traits.loadRhs(&blB[3*RhsProgress], B3);
649 traits.loadRhs(&blB[4*RhsProgress], B_0);
650 traits.madd(A0,B1,C1,T0);
651 traits.madd(A1,B1,C5,B1);
652 traits.loadRhs(&blB[5*RhsProgress], B1);
653 traits.madd(A0,B2,C2,T0);
654 traits.madd(A1,B2,C6,B2);
655 traits.loadRhs(&blB[6*RhsProgress], B2);
656 traits.madd(A0,B3,C3,T0);
657 traits.loadLhs(&blA[2*LhsProgress], A0);
658 traits.madd(A1,B3,C7,B3);
659 traits.loadLhs(&blA[3*LhsProgress], A1);
660 traits.loadRhs(&blB[7*RhsProgress], B3);
661 traits.madd(A0,B_0,C0,T0);
662 traits.madd(A1,B_0,C4,B_0);
663 traits.loadRhs(&blB[8*RhsProgress], B_0);
664 traits.madd(A0,B1,C1,T0);
665 traits.madd(A1,B1,C5,B1);
666 traits.loadRhs(&blB[9*RhsProgress], B1);
667 traits.madd(A0,B2,C2,T0);
668 traits.madd(A1,B2,C6,B2);
669 traits.loadRhs(&blB[10*RhsProgress], B2);
670 traits.madd(A0,B3,C3,T0);
671 traits.loadLhs(&blA[4*LhsProgress], A0);
672 traits.madd(A1,B3,C7,B3);
673 traits.loadLhs(&blA[5*LhsProgress], A1);
674 traits.loadRhs(&blB[11*RhsProgress], B3);
676 traits.madd(A0,B_0,C0,T0);
677 traits.madd(A1,B_0,C4,B_0);
678 traits.loadRhs(&blB[12*RhsProgress], B_0);
679 traits.madd(A0,B1,C1,T0);
680 traits.madd(A1,B1,C5,B1);
681 traits.loadRhs(&blB[13*RhsProgress], B1);
682 traits.madd(A0,B2,C2,T0);
683 traits.madd(A1,B2,C6,B2);
684 traits.loadRhs(&blB[14*RhsProgress], B2);
685 traits.madd(A0,B3,C3,T0);
686 traits.loadLhs(&blA[6*LhsProgress], A0);
687 traits.madd(A1,B3,C7,B3);
688 traits.loadLhs(&blA[7*LhsProgress], A1);
689 traits.loadRhs(&blB[15*RhsProgress], B3);
690 traits.madd(A0,B_0,C0,T0);
691 traits.madd(A1,B_0,C4,B_0);
692 traits.madd(A0,B1,C1,T0);
693 traits.madd(A1,B1,C5,B1);
694 traits.madd(A0,B2,C2,T0);
695 traits.madd(A1,B2,C6,B2);
696 traits.madd(A0,B3,C3,T0);
697 traits.madd(A1,B3,C7,B3);
700 blB += 4*nr*RhsProgress;
704 for(Index k=peeled_kc; k<depth; k++)
712 traits.loadLhs(&blA[0*LhsProgress], A0);
713 traits.loadLhs(&blA[1*LhsProgress], A1);
714 traits.loadRhs(&blB[0*RhsProgress], B_0);
715 traits.madd(A0,B_0,C0,T0);
716 traits.madd(A1,B_0,C4,B_0);
717 traits.loadRhs(&blB[1*RhsProgress], B_0);
718 traits.madd(A0,B_0,C1,T0);
719 traits.madd(A1,B_0,C5,B_0);
724 RhsPacket B_0, B1, B2, B3;
727 traits.loadLhs(&blA[0*LhsProgress], A0);
728 traits.loadLhs(&blA[1*LhsProgress], A1);
729 traits.loadRhs(&blB[0*RhsProgress], B_0);
730 traits.loadRhs(&blB[1*RhsProgress], B1);
732 traits.madd(A0,B_0,C0,T0);
733 traits.loadRhs(&blB[2*RhsProgress], B2);
734 traits.madd(A1,B_0,C4,B_0);
735 traits.loadRhs(&blB[3*RhsProgress], B3);
736 traits.madd(A0,B1,C1,T0);
737 traits.madd(A1,B1,C5,B1);
738 traits.madd(A0,B2,C2,T0);
739 traits.madd(A1,B2,C6,B2);
740 traits.madd(A0,B3,C3,T0);
741 traits.madd(A1,B3,C7,B3);
744 blB += nr*RhsProgress;
750 ResPacket R0, R1, R2, R3, R4, R5, R6;
751 ResPacket alphav = pset1<ResPacket>(alpha);
753 R0 = ploadu<ResPacket>(r0);
754 R1 = ploadu<ResPacket>(r1);
755 R2 = ploadu<ResPacket>(r2);
756 R3 = ploadu<ResPacket>(r3);
757 R4 = ploadu<ResPacket>(r0 + ResPacketSize);
758 R5 = ploadu<ResPacket>(r1 + ResPacketSize);
759 R6 = ploadu<ResPacket>(r2 + ResPacketSize);
760 traits.acc(C0, alphav, R0);
762 R0 = ploadu<ResPacket>(r3 + ResPacketSize);
764 traits.acc(C1, alphav, R1);
765 traits.acc(C2, alphav, R2);
766 traits.acc(C3, alphav, R3);
767 traits.acc(C4, alphav, R4);
768 traits.acc(C5, alphav, R5);
769 traits.acc(C6, alphav, R6);
770 traits.acc(C7, alphav, R0);
775 pstoreu(r0 + ResPacketSize, R4);
776 pstoreu(r1 + ResPacketSize, R5);
777 pstoreu(r2 + ResPacketSize, R6);
778 pstoreu(r3 + ResPacketSize, R0);
782 ResPacket R0, R1, R4;
783 ResPacket alphav = pset1<ResPacket>(alpha);
785 R0 = ploadu<ResPacket>(r0);
786 R1 = ploadu<ResPacket>(r1);
787 R4 = ploadu<ResPacket>(r0 + ResPacketSize);
788 traits.acc(C0, alphav, R0);
790 R0 = ploadu<ResPacket>(r1 + ResPacketSize);
791 traits.acc(C1, alphav, R1);
792 traits.acc(C4, alphav, R4);
793 traits.acc(C5, alphav, R0);
795 pstoreu(r0 + ResPacketSize, R4);
796 pstoreu(r1 + ResPacketSize, R0);
801 if(rows-peeled_mc>=LhsProgress)
804 const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
808 AccPacket C0, C1, C2, C3;
811 if(nr==4) traits.initAcc(C2);
812 if(nr==4) traits.initAcc(C3);
815 const RhsScalar* blB = unpackedB;
816 for(Index k=0; k<peeled_kc; k+=4)
823 traits.loadLhs(&blA[0*LhsProgress], A0);
824 traits.loadRhs(&blB[0*RhsProgress], B_0);
825 traits.loadRhs(&blB[1*RhsProgress], B1);
826 traits.madd(A0,B_0,C0,B_0);
827 traits.loadRhs(&blB[2*RhsProgress], B_0);
828 traits.madd(A0,B1,C1,B1);
829 traits.loadLhs(&blA[1*LhsProgress], A0);
830 traits.loadRhs(&blB[3*RhsProgress], B1);
831 traits.madd(A0,B_0,C0,B_0);
832 traits.loadRhs(&blB[4*RhsProgress], B_0);
833 traits.madd(A0,B1,C1,B1);
834 traits.loadLhs(&blA[2*LhsProgress], A0);
835 traits.loadRhs(&blB[5*RhsProgress], B1);
836 traits.madd(A0,B_0,C0,B_0);
837 traits.loadRhs(&blB[6*RhsProgress], B_0);
838 traits.madd(A0,B1,C1,B1);
839 traits.loadLhs(&blA[3*LhsProgress], A0);
840 traits.loadRhs(&blB[7*RhsProgress], B1);
841 traits.madd(A0,B_0,C0,B_0);
842 traits.madd(A0,B1,C1,B1);
847 RhsPacket B_0, B1, B2, B3;
849 traits.loadLhs(&blA[0*LhsProgress], A0);
850 traits.loadRhs(&blB[0*RhsProgress], B_0);
851 traits.loadRhs(&blB[1*RhsProgress], B1);
853 traits.madd(A0,B_0,C0,B_0);
854 traits.loadRhs(&blB[2*RhsProgress], B2);
855 traits.loadRhs(&blB[3*RhsProgress], B3);
856 traits.loadRhs(&blB[4*RhsProgress], B_0);
857 traits.madd(A0,B1,C1,B1);
858 traits.loadRhs(&blB[5*RhsProgress], B1);
859 traits.madd(A0,B2,C2,B2);
860 traits.loadRhs(&blB[6*RhsProgress], B2);
861 traits.madd(A0,B3,C3,B3);
862 traits.loadLhs(&blA[1*LhsProgress], A0);
863 traits.loadRhs(&blB[7*RhsProgress], B3);
864 traits.madd(A0,B_0,C0,B_0);
865 traits.loadRhs(&blB[8*RhsProgress], B_0);
866 traits.madd(A0,B1,C1,B1);
867 traits.loadRhs(&blB[9*RhsProgress], B1);
868 traits.madd(A0,B2,C2,B2);
869 traits.loadRhs(&blB[10*RhsProgress], B2);
870 traits.madd(A0,B3,C3,B3);
871 traits.loadLhs(&blA[2*LhsProgress], A0);
872 traits.loadRhs(&blB[11*RhsProgress], B3);
874 traits.madd(A0,B_0,C0,B_0);
875 traits.loadRhs(&blB[12*RhsProgress], B_0);
876 traits.madd(A0,B1,C1,B1);
877 traits.loadRhs(&blB[13*RhsProgress], B1);
878 traits.madd(A0,B2,C2,B2);
879 traits.loadRhs(&blB[14*RhsProgress], B2);
880 traits.madd(A0,B3,C3,B3);
882 traits.loadLhs(&blA[3*LhsProgress], A0);
883 traits.loadRhs(&blB[15*RhsProgress], B3);
884 traits.madd(A0,B_0,C0,B_0);
885 traits.madd(A0,B1,C1,B1);
886 traits.madd(A0,B2,C2,B2);
887 traits.madd(A0,B3,C3,B3);
890 blB += nr*4*RhsProgress;
891 blA += 4*LhsProgress;
894 for(Index k=peeled_kc; k<depth; k++)
901 traits.loadLhs(&blA[0*LhsProgress], A0);
902 traits.loadRhs(&blB[0*RhsProgress], B_0);
903 traits.loadRhs(&blB[1*RhsProgress], B1);
904 traits.madd(A0,B_0,C0,B_0);
905 traits.madd(A0,B1,C1,B1);
910 RhsPacket B_0, B1, B2, B3;
912 traits.loadLhs(&blA[0*LhsProgress], A0);
913 traits.loadRhs(&blB[0*RhsProgress], B_0);
914 traits.loadRhs(&blB[1*RhsProgress], B1);
915 traits.loadRhs(&blB[2*RhsProgress], B2);
916 traits.loadRhs(&blB[3*RhsProgress], B3);
918 traits.madd(A0,B_0,C0,B_0);
919 traits.madd(A0,B1,C1,B1);
920 traits.madd(A0,B2,C2,B2);
921 traits.madd(A0,B3,C3,B3);
924 blB += nr*RhsProgress;
928 ResPacket R0, R1, R2, R3;
929 ResPacket alphav = pset1<ResPacket>(alpha);
931 ResScalar* r0 = &res[(j2+0)*resStride + i];
932 ResScalar* r1 = r0 + resStride;
933 ResScalar* r2 = r1 + resStride;
934 ResScalar* r3 = r2 + resStride;
936 R0 = ploadu<ResPacket>(r0);
937 R1 = ploadu<ResPacket>(r1);
938 if(nr==4) R2 = ploadu<ResPacket>(r2);
939 if(nr==4) R3 = ploadu<ResPacket>(r3);
941 traits.acc(C0, alphav, R0);
942 traits.acc(C1, alphav, R1);
943 if(nr==4) traits.acc(C2, alphav, R2);
944 if(nr==4) traits.acc(C3, alphav, R3);
948 if(nr==4) pstoreu(r2, R2);
949 if(nr==4) pstoreu(r3, R3);
951 for(Index i=peeled_mc2; i<rows; i++)
953 const LhsScalar* blA = &blockA[i*strideA+offsetA];
957 ResScalar C0(0), C1(0), C2(0), C3(0);
959 const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
960 for(Index k=0; k<depth; k++)
970 MADD(cj,A0,B_0,C0,B_0);
971 MADD(cj,A0,B1,C1,B1);
976 RhsScalar B_0, B1, B2, B3;
984 MADD(cj,A0,B_0,C0,B_0);
985 MADD(cj,A0,B1,C1,B1);
986 MADD(cj,A0,B2,C2,B2);
987 MADD(cj,A0,B3,C3,B3);
992 res[(j2+0)*resStride + i] += alpha*C0;
993 res[(j2+1)*resStride + i] += alpha*C1;
994 if(nr==4) res[(j2+2)*resStride + i] += alpha*C2;
995 if(nr==4) res[(j2+3)*resStride + i] += alpha*C3;
1000 for(Index j2=packet_cols; j2<cols; j2++)
1003 traits.unpackRhs(depth, &blockB[j2*strideB+offsetB], unpackedB);
1005 for(Index i=0; i<peeled_mc; i+=mr)
1007 const LhsScalar* blA = &blockA[i*strideA+offsetA*mr];
1017 const RhsScalar* blB = unpackedB;
1018 for(Index k=0; k<depth; k++)
1024 traits.loadLhs(&blA[0*LhsProgress], A0);
1025 traits.loadLhs(&blA[1*LhsProgress], A1);
1026 traits.loadRhs(&blB[0*RhsProgress], B_0);
1027 traits.madd(A0,B_0,C0,T0);
1028 traits.madd(A1,B_0,C4,B_0);
1031 blA += 2*LhsProgress;
1034 ResPacket alphav = pset1<ResPacket>(alpha);
1036 ResScalar* r0 = &res[(j2+0)*resStride + i];
1038 R0 = ploadu<ResPacket>(r0);
1039 R4 = ploadu<ResPacket>(r0+ResPacketSize);
1041 traits.acc(C0, alphav, R0);
1042 traits.acc(C4, alphav, R4);
1045 pstoreu(r0+ResPacketSize, R4);
1047 if(rows-peeled_mc>=LhsProgress)
1049 Index i = peeled_mc;
1050 const LhsScalar* blA = &blockA[i*strideA+offsetA*LhsProgress];
1056 const RhsScalar* blB = unpackedB;
1057 for(Index k=0; k<depth; k++)
1061 traits.loadLhs(blA, A0);
1062 traits.loadRhs(blB, B_0);
1063 traits.madd(A0, B_0, C0, B_0);
1068 ResPacket alphav = pset1<ResPacket>(alpha);
1069 ResPacket R0 = ploadu<ResPacket>(&res[(j2+0)*resStride + i]);
1070 traits.acc(C0, alphav, R0);
1071 pstoreu(&res[(j2+0)*resStride + i], R0);
1073 for(Index i=peeled_mc2; i<rows; i++)
1075 const LhsScalar* blA = &blockA[i*strideA+offsetA];
1081 const RhsScalar* blB = &blockB[j2*strideB+offsetB];
1082 for(Index k=0; k<depth; k++)
1084 LhsScalar A0 = blA[k];
1085 RhsScalar B_0 = blB[k];
1086 MADD(cj, A0, B_0, C0, B_0);
1088 res[(j2+0)*resStride + i] += alpha*C0;
1110 template<
typename Scalar,
typename Index,
int Pack1,
int Pack2,
int StorageOrder,
bool Conjugate,
bool PanelMode>
1111 struct gemm_pack_lhs
1113 EIGEN_DONT_INLINE
void operator()(Scalar* blockA,
const Scalar* EIGEN_RESTRICT _lhs, Index lhsStride, Index depth, Index rows,
1114 Index stride=0, Index offset=0)
1116 typedef typename packet_traits<Scalar>::type Packet;
1117 enum { PacketSize = packet_traits<Scalar>::size };
1119 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK LHS");
1120 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1121 eigen_assert( (StorageOrder==
RowMajor) || ((Pack1%PacketSize)==0 && Pack1<=4*PacketSize) );
1122 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1123 const_blas_data_mapper<Scalar, Index, StorageOrder> lhs(_lhs,lhsStride);
1125 Index peeled_mc = (rows/Pack1)*Pack1;
1126 for(Index i=0; i<peeled_mc; i+=Pack1)
1128 if(PanelMode) count += Pack1 * offset;
1132 for(Index k=0; k<depth; k++)
1135 if(Pack1>=1*PacketSize) A = ploadu<Packet>(&lhs(i+0*PacketSize, k));
1136 if(Pack1>=2*PacketSize) B = ploadu<Packet>(&lhs(i+1*PacketSize, k));
1137 if(Pack1>=3*PacketSize) C = ploadu<Packet>(&lhs(i+2*PacketSize, k));
1138 if(Pack1>=4*PacketSize) D = ploadu<Packet>(&lhs(i+3*PacketSize, k));
1139 if(Pack1>=1*PacketSize) { pstore(blockA+count, cj.pconj(A)); count+=PacketSize; }
1140 if(Pack1>=2*PacketSize) { pstore(blockA+count, cj.pconj(B)); count+=PacketSize; }
1141 if(Pack1>=3*PacketSize) { pstore(blockA+count, cj.pconj(C)); count+=PacketSize; }
1142 if(Pack1>=4*PacketSize) { pstore(blockA+count, cj.pconj(D)); count+=PacketSize; }
1147 for(Index k=0; k<depth; k++)
1151 for(; w<Pack1-3; w+=4)
1153 Scalar a(cj(lhs(i+w+0, k))),
1154 b(cj(lhs(i+w+1, k))),
1155 c(cj(lhs(i+w+2, k))),
1156 d(cj(lhs(i+w+3, k)));
1157 blockA[count++] = a;
1158 blockA[count++] = b;
1159 blockA[count++] = c;
1160 blockA[count++] = d;
1164 blockA[count++] = cj(lhs(i+w, k));
1167 if(PanelMode) count += Pack1 * (stride-offset-depth);
1169 if(rows-peeled_mc>=Pack2)
1171 if(PanelMode) count += Pack2*offset;
1172 for(Index k=0; k<depth; k++)
1173 for(Index w=0; w<Pack2; w++)
1174 blockA[count++] = cj(lhs(peeled_mc+w, k));
1175 if(PanelMode) count += Pack2 * (stride-offset-depth);
1178 for(Index i=peeled_mc; i<rows; i++)
1180 if(PanelMode) count += offset;
1181 for(Index k=0; k<depth; k++)
1182 blockA[count++] = cj(lhs(i, k));
1183 if(PanelMode) count += (stride-offset-depth);
1195 template<
typename Scalar,
typename Index,
int nr,
bool Conjugate,
bool PanelMode>
1196 struct gemm_pack_rhs<Scalar, Index, nr,
ColMajor, Conjugate, PanelMode>
1198 typedef typename packet_traits<Scalar>::type Packet;
1199 enum { PacketSize = packet_traits<Scalar>::size };
1200 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const Scalar* rhs, Index rhsStride, Index depth, Index cols,
1201 Index stride=0, Index offset=0)
1203 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS COLMAJOR");
1204 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1205 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1206 Index packet_cols = (cols/nr) * nr;
1208 for(Index j2=0; j2<packet_cols; j2+=nr)
1211 if(PanelMode) count += nr * offset;
1212 const Scalar* b0 = &rhs[(j2+0)*rhsStride];
1213 const Scalar* b1 = &rhs[(j2+1)*rhsStride];
1214 const Scalar* b2 = &rhs[(j2+2)*rhsStride];
1215 const Scalar* b3 = &rhs[(j2+3)*rhsStride];
1216 for(Index k=0; k<depth; k++)
1218 blockB[count+0] = cj(b0[k]);
1219 blockB[count+1] = cj(b1[k]);
1220 if(nr==4) blockB[count+2] = cj(b2[k]);
1221 if(nr==4) blockB[count+3] = cj(b3[k]);
1225 if(PanelMode) count += nr * (stride-offset-depth);
1229 for(Index j2=packet_cols; j2<cols; ++j2)
1231 if(PanelMode) count += offset;
1232 const Scalar* b0 = &rhs[(j2+0)*rhsStride];
1233 for(Index k=0; k<depth; k++)
1235 blockB[count] = cj(b0[k]);
1238 if(PanelMode) count += (stride-offset-depth);
1244 template<
typename Scalar,
typename Index,
int nr,
bool Conjugate,
bool PanelMode>
1245 struct gemm_pack_rhs<Scalar, Index, nr,
RowMajor, Conjugate, PanelMode>
1247 enum { PacketSize = packet_traits<Scalar>::size };
1248 EIGEN_DONT_INLINE
void operator()(Scalar* blockB,
const Scalar* rhs, Index rhsStride, Index depth, Index cols,
1249 Index stride=0, Index offset=0)
1251 EIGEN_ASM_COMMENT(
"EIGEN PRODUCT PACK RHS ROWMAJOR");
1252 eigen_assert(((!PanelMode) && stride==0 && offset==0) || (PanelMode && stride>=depth && offset<=stride));
1253 conj_if<NumTraits<Scalar>::IsComplex && Conjugate> cj;
1254 Index packet_cols = (cols/nr) * nr;
1256 for(Index j2=0; j2<packet_cols; j2+=nr)
1259 if(PanelMode) count += nr * offset;
1260 for(Index k=0; k<depth; k++)
1262 const Scalar* b0 = &rhs[k*rhsStride + j2];
1263 blockB[count+0] = cj(b0[0]);
1264 blockB[count+1] = cj(b0[1]);
1265 if(nr==4) blockB[count+2] = cj(b0[2]);
1266 if(nr==4) blockB[count+3] = cj(b0[3]);
1270 if(PanelMode) count += nr * (stride-offset-depth);
1273 for(Index j2=packet_cols; j2<cols; ++j2)
1275 if(PanelMode) count += offset;
1276 const Scalar* b0 = &rhs[j2];
1277 for(Index k=0; k<depth; k++)
1279 blockB[count] = cj(b0[k*rhsStride]);
1282 if(PanelMode) count += stride-offset-depth;
1293 std::ptrdiff_t l1, l2;
1294 internal::manage_caching_sizes(GetAction, &l1, &l2);
1302 std::ptrdiff_t l1, l2;
1303 internal::manage_caching_sizes(GetAction, &l1, &l2);
1314 internal::manage_caching_sizes(SetAction, &l1, &l2);
1319 #endif // EIGEN_GENERAL_BLOCK_PANEL_H