SHOGUN  6.0.0
LinalgNamespace.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016, Shogun-Toolbox e.V. <shogun-team@shogun-toolbox.org>
3  * All rights reserved.
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * 1. Redistributions of source code must retain the above copyright notice,
8  * this list of conditions and the following disclaimer.
9  *
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  *
14  * 3. Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  * Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
31  */
32 
33 #ifndef LINALG_NAMESPACE_H_
34 #define LINALG_NAMESPACE_H_
35 
38 
39 namespace shogun
40 {
41 
42 namespace linalg
43 {
44 
51 template <typename T, template <typename> class Container>
52 LinalgBackendBase* infer_backend(const Container<T>& a)
53 {
54  if (a.on_gpu())
55  {
56  if (sg_linalg->get_gpu_backend())
57  return sg_linalg->get_gpu_backend();
58  else
59  {
60  SG_SERROR("Vector or matrix is on GPU but no GPU backend registered. \
61  This can happen if the GPU backend was de-activated \
62  after memory has been transferred to GPU.\n");
63  return NULL;
64  }
65  }
66  else
67  return sg_linalg->get_cpu_backend();
68 }
69 
78 template <typename T, template <typename> class Container>
79 LinalgBackendBase* infer_backend(const Container<T>& a, const Container<T>& b)
80 {
81  if (a.on_gpu() && b.on_gpu())
82  {
83  if (sg_linalg->get_gpu_backend())
84  return sg_linalg->get_gpu_backend();
85  else
86  {
87  SG_SERROR("Vector or matrix is on GPU but no GPU backend registered. \
88  This can happen if the GPU backend was de-activated \
89  after memory has been transferred to GPU.\n");
90  return NULL;
91  }
92  }
93  else if (a.on_gpu() || b.on_gpu())
94  {
95  SG_SERROR("Cannot operate with first vector/matrix on_gpu flag(%d) \
96  and second vector/matrix on_gpu flag (%d).\n",
97  a.on_gpu(), b.on_gpu());
98  return NULL;
99  }
100  else
101  return sg_linalg->get_cpu_backend();
102 }
103 
111 template <typename T>
113 {
114  sg_linalg->m_gpu_transfer.lock();
115 
116  if (a.on_gpu())
117  {
118  if (sg_linalg->get_linalg_warnings())
119  SG_SWARNING("The vector is already on GPU.\n");
120  }
121  else
122  {
123  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
124 
125  if (gpu_backend)
126  b = SGVector<T>(gpu_backend->to_gpu(a), a.vlen);
127  else
128  {
129  if (sg_linalg->get_linalg_warnings())
130  SG_SWARNING("Trying to access GPU memory\
131  without GPU backend registered.\n");
132  b = a;
133  }
134  }
135 
136  sg_linalg->m_gpu_transfer.unlock();
137 }
138 
146 template <typename T>
148 {
149  sg_linalg->m_gpu_transfer.lock();
150 
151  if (a.on_gpu())
152  {
153  if (sg_linalg->get_linalg_warnings())
154  SG_SWARNING("The matrix is already on GPU.\n");
155  }
156  else
157  {
158  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
159 
160  if (gpu_backend)
161  b = SGMatrix<T>(gpu_backend->to_gpu(a), a.num_rows, a.num_cols);
162  else
163  {
164  if (sg_linalg->get_linalg_warnings())
165  SG_SWARNING("Trying to access GPU memory\
166  without GPU backend registered.\n");
167  b = a;
168  }
169  }
170 
171  sg_linalg->m_gpu_transfer.unlock();
172 }
173 
179 template <typename T, template<typename> class Container>
180 void to_gpu(Container<T>& a)
181 {
182  to_gpu(a, a);
183 }
184 
192 template <typename T>
194 {
195  sg_linalg->m_gpu_transfer.lock();
196  if (a.on_gpu())
197  {
198  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
199  if (gpu_backend)
200  {
201  typedef typename std::aligned_storage<sizeof(T), alignof(T)>::type aligned_t;
202  T* data;
203  data = reinterpret_cast<T*>(SG_MALLOC(aligned_t, a.size()));
204  gpu_backend->from_gpu(a, data);
205  b = SGVector<T>(data, a.size());
206  }
207  else
208  SG_SERROR("Data memory on GPU but no GPU backend registered. \
209  This can happen if the GPU backend was de-activated \
210  after memory has been transferred to GPU.\n");
211  }
212  else
213  {
214  if (sg_linalg->get_linalg_warnings())
215  SG_SWARNING("The data is already on CPU.\n");
216  b = a;
217  }
218 
219  sg_linalg->m_gpu_transfer.unlock();
220 }
221 
229 template <typename T>
231 {
232  sg_linalg->m_gpu_transfer.lock();
233  if (a.on_gpu())
234  {
235  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
236  if (gpu_backend)
237  {
238  typedef typename std::aligned_storage<sizeof(T), alignof(T)>::type aligned_t;
239  T* data;
240  data = reinterpret_cast<T*>(SG_MALLOC(aligned_t, a.num_rows*a.num_cols));
241  gpu_backend->from_gpu(a, data);
242  b = SGMatrix<T>(data, a.num_rows, a.num_cols);
243  }
244  else
245  SG_SERROR("Data memory on GPU but no GPU backend registered. \
246  This can happen if the GPU backend was de-activated \
247  after memory has been transferred to GPU.\n");
248  }
249  else
250  {
251  if (sg_linalg->get_linalg_warnings())
252  SG_SWARNING("The data is already on CPU.\n");
253  b = a;
254  }
255 
256  sg_linalg->m_gpu_transfer.unlock();
257 }
258 
265 template <typename T, template<typename> class Container>
266 void from_gpu(Container<T>& a)
267 {
268  from_gpu(a, a);
269 }
270 
283 template <typename T>
284 void add(SGVector<T>& a, SGVector<T>& b, SGVector<T>& result, T alpha=1, T beta=1)
285 {
286  REQUIRE(a.vlen == b.vlen,
287  "Length of vector a (%d) doesn't match vector b (%d).\n", a.vlen, b.vlen);
288  REQUIRE(result.vlen == b.vlen,
289  "Length of vector result (%d) doesn't match vector a (%d).\n",
290  result.vlen, a.vlen);
291 
292  REQUIRE(!(result.on_gpu()^a.on_gpu()),
293  "Cannot operate with vector result on_gpu (%d) and vector a on_gpu (%d).\n",
294  result.on_gpu(), a.on_gpu());
295  REQUIRE(!(result.on_gpu()^b.on_gpu()),
296  "Cannot operate with vector result on_gpu (%d) and vector b on_gpu (%d).\n",
297  result.on_gpu(), b.on_gpu());
298 
299  infer_backend(a, b)->add(a, b, alpha, beta, result);
300 }
301 
314 template <typename T>
315 void add(SGMatrix<T>& a, SGMatrix<T>& b, SGMatrix<T>& result, T alpha=1, T beta=1)
316 {
317  REQUIRE((a.num_rows == b.num_rows),
318  "Number of rows of matrix a (%d) must match matrix b (%d).\n",
319  a.num_rows, b.num_rows);
320  REQUIRE((a.num_cols == b.num_cols),
321  "Number of columns of matrix a (%d) must match matrix b (%d).\n",
322  a.num_cols, b.num_cols);
323 
324  REQUIRE(!(result.on_gpu()^a.on_gpu()),
325  "Cannot operate with matrix result on_gpu (%d) and matrix a on_gpu (%d).\n",
326  result.on_gpu(), a.on_gpu());
327  REQUIRE(!(result.on_gpu()^b.on_gpu()),
328  "Cannot operate with matrix result on_gpu (%d) and matrix b on_gpu (%d).\n",
329  result.on_gpu(), b.on_gpu());
330 
331  infer_backend(a, b)->add(a, b, alpha, beta, result);
332 }
333 
344 template <typename T, template<typename> class Container>
345 Container<T> add(Container<T>& a, Container<T>& b, T alpha=1, T beta=1)
346 {
347  auto result = a.clone();
348  add(a, b, result, alpha, beta);
349  return result;
350 }
351 
361 template <typename T>
362 SGMatrix<T> cholesky_factor(const SGMatrix<T>& A, const bool lower=true)
363 {
364  return infer_backend(A)->cholesky_factor(A, lower);
365 }
366 
377 template <typename T>
379  const bool lower=true)
380 {
381  return infer_backend(L, SGMatrix<T>(b))->cholesky_solver(L, b, lower);
382 }
383 
392 template <typename T>
393 T dot(const SGVector<T>& a, const SGVector<T>& b)
394 {
395  REQUIRE(a.vlen == b.vlen,
396  "Length of vector a (%d) doesn't match vector b (%d).\n", a.vlen, b.vlen);
397  return infer_backend(a, b)->dot(a, b);
398 }
399 
412 template <typename T>
414 {
415  REQUIRE(a.m_row_size == b.m_row_size && a.m_col_size == b.m_col_size,
416  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n",
417  a.m_row_size, a.m_col_size, b.m_row_size, b.m_col_size);
418  REQUIRE(a.m_row_size == result.num_rows && a.m_col_size == result.num_cols,
419  "Dimension mismatch! A(%d x %d) vs result(%d x %d)\n",
420  a.m_row_size, a.m_col_size, result.num_rows, result.num_cols);
421 
422  REQUIRE(!result.on_gpu(), "Cannot operate with matrix result on_gpu (%d) \
423  as matrix blocks are on CPU.\n", result.on_gpu());
424 
425  sg_linalg->get_cpu_backend()->element_prod(a, b, result);
426 }
427 
437 template <typename T>
439 {
440  REQUIRE(a.m_row_size == b.m_row_size && a.m_col_size == b.m_col_size,
441  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n",
442  a.m_row_size, a.m_col_size, b.m_row_size, b.m_col_size);
443 
444  SGMatrix<T> result(a.m_row_size, a.m_col_size);
445  result.zero();
446 
447  element_prod(a, b, result);
448 
449  return result;
450 }
451 
462 template <typename T>
464 {
465  REQUIRE(a.num_rows == b.num_rows && a.num_cols == b.num_cols,
466  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n",
467  a.num_rows, a.num_cols, b.num_rows, b.num_cols);
468  REQUIRE(a.num_rows == result.num_rows && a.num_cols == result.num_cols,
469  "Dimension mismatch! A(%d x %d) vs result(%d x %d)\n",
470  a.num_rows, a.num_cols, result.num_rows, result.num_cols);
471 
472  REQUIRE(!(result.on_gpu()^a.on_gpu()),
473  "Cannot operate with matrix result on_gpu (%d) and \
474  matrix A on_gpu (%d).\n", result.on_gpu(), a.on_gpu());
475  REQUIRE(!(result.on_gpu()^b.on_gpu()),
476  "Cannot operate with matrix result on_gpu (%d) and \
477  matrix B on_gpu (%d).\n", result.on_gpu(), b.on_gpu());
478 
479  infer_backend(a, b)->element_prod(a, b, result);
480 }
481 
490 template <typename T>
492 {
493  REQUIRE(a.num_rows == b.num_rows && a.num_cols == b.num_cols,
494  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n",
495  a.num_rows, a.num_cols, b.num_rows, b.num_cols);
496 
497  SGMatrix<T> result;
498  result = a.clone();
499 
500  element_prod(a, b, result);
501 
502  return result;
503 }
504 
515 template <typename T>
516 void matrix_prod(SGMatrix<T>& A, SGVector<T>& b, SGVector<T>& result, bool transpose=false)
517 {
518  if (transpose)
519  {
520  REQUIRE(A.num_rows == b.vlen, "Row number of Matrix A (%d) doesn't match \
521  length of vector b (%d).\n", A.num_rows, b.vlen);
522  REQUIRE(result.vlen == A.num_cols, "Length of vector result (%d) doesn't match \
523  column number of Matrix A (%d).\n", result.vlen, A.num_cols);
524  }
525  else
526  {
527  REQUIRE(A.num_cols == b.vlen, "Column number of Matrix A (%d) doesn't match \
528  length of vector b (%d).\n", A.num_cols, b.vlen);
529  REQUIRE(result.vlen == A.num_rows, "Length of vector result (%d) doesn't match \
530  row number of Matrix A (%d).\n", result.vlen, A.num_rows);
531  }
532 
533  REQUIRE(!(result.on_gpu()^A.on_gpu()),
534  "Cannot operate with vector result on_gpu (%d) and vector a on_gpu (%d).\n",
535  result.on_gpu(), A.on_gpu());
536  REQUIRE(!(result.on_gpu()^b.on_gpu()),
537  "Cannot operate with vector result on_gpu (%d) and vector b on_gpu (%d).\n",
538  result.on_gpu(), b.on_gpu());
539 
540  infer_backend(A, SGMatrix<T>(b))->matrix_prod(A, b, result, transpose, false);
541 }
542 
551 template <typename T>
552 SGVector<T> matrix_prod(SGMatrix<T>& A, SGVector<T>& b, bool transpose=false)
553 {
554  SGVector<T> result;
555  if (transpose)
556  {
557  REQUIRE(A.num_rows == b.vlen, "Row number of Matrix A (%d) doesn't match \
558  length of vector b (%d).\n", A.num_rows, b.vlen);
559  result = SGVector<T>(A.num_cols);
560  }
561  else
562  {
563  REQUIRE(A.num_cols == b.vlen, "Column number of Matrix A (%d) doesn't match \
564  length of vector b (%d).\n", A.num_cols, b.vlen);
565  result = SGVector<T>(A.num_rows);
566  }
567 
568  if (A.on_gpu())
569  to_gpu(result);
570 
571  matrix_prod(A, b, result, transpose);
572  return result;
573 }
574 
586 template <typename T>
588  bool transpose_A=false, bool transpose_B=false)
589 {
590  REQUIRE(!(result.on_gpu()^A.on_gpu()),
591  "Cannot operate with matrix result on_gpu (%d) and \
592  matrix A on_gpu (%d).\n", result.on_gpu(), A.on_gpu());
593  REQUIRE(!(result.on_gpu()^B.on_gpu()),
594  "Cannot operate with matrix result on_gpu (%d) and \
595  matrix B on_gpu (%d).\n", result.on_gpu(), B.on_gpu());
596 
597  if (transpose_A)
598  {
599  REQUIRE(A.num_cols == result.num_rows, "Number of columns for A (%d) and \
600  number of rows for result (%d) should be equal!\n", A.num_cols, result.num_rows);
601  if (transpose_B)
602  {
603  REQUIRE(A.num_rows == B.num_cols, "Number of rows for A (%d) and \
604  number of columns for B (%d) should be equal!\n", A.num_rows, B.num_cols);
605  REQUIRE(B.num_rows == result.num_cols, "Number of rows for B (%d) and \
606  number of columns for result (%d) should be equal!\n",
607  B.num_rows, result.num_cols);
608  }
609  else
610  {
611  REQUIRE(A.num_rows == B.num_rows, "Number of rows for A (%d) and \
612  number of rows for B (%d) should be equal!\n", A.num_rows, B.num_rows);
613  REQUIRE(B.num_cols == result.num_cols, "Number of columns for B (%d) and \
614  number of columns for result (%d) should be equal!\n",
615  B.num_cols, result.num_cols);
616  }
617  }
618  else
619  {
620  REQUIRE(A.num_rows == result.num_rows, "Number of rows for A (%d) and \
621  number of rows for result (%d) should be equal!\n", A.num_rows, result.num_rows);
622  if (transpose_B)
623  {
624  REQUIRE(A.num_cols == B.num_cols, "Number of columns for A (%d) and \
625  number of columns for B (%d) should be equal!\n", A.num_cols, B.num_cols);
626  REQUIRE(B.num_rows == result.num_cols, "Number of rows for B (%d) and \
627  number of columns for result (%d) should be equal!\n",
628  B.num_rows, result.num_cols);
629  }
630  else
631  {
632  REQUIRE(A.num_cols == B.num_rows, "Number of columns for A (%d) and \
633  number of rows for B (%d) should be equal!\n", A.num_cols, B.num_rows);
634  REQUIRE(B.num_cols == result.num_cols, "Number of columns for B (%d) and \
635  number of columns for result (%d) should be equal!\n",
636  B.num_cols, result.num_cols);
637  }
638  }
639 
640  infer_backend(A, B)->matrix_prod(A, B, result, transpose_A, transpose_B);
641 }
642 
654 template <typename T>
656  bool transpose_A=false, bool transpose_B=false)
657 {
658  SGMatrix<T> result;
659 
660  if (transpose_A & transpose_B)
661  {
662  REQUIRE(A.num_rows == B.num_cols, "Number of rows for A (%d) and \
663  number of columns for B (%d) should be equal!\n", A.num_rows, B.num_cols);
664  result = SGMatrix<T>(A.num_cols, B.num_rows);
665  }
666  else if (transpose_A)
667  {
668  REQUIRE(A.num_rows == B.num_rows, "Number of rows for A (%d) and \
669  number of rows for B (%d) should be equal!\n", A.num_rows, B.num_rows);
670  result = SGMatrix<T>(A.num_cols, B.num_cols);
671  }
672  else if (transpose_B)
673  {
674  REQUIRE(A.num_cols == B.num_cols, "Number of columns for A (%d) and \
675  number of columns for B (%d) should be equal!\n", A.num_cols, B.num_cols);
676  result = SGMatrix<T>(A.num_rows, B.num_rows);
677  }
678  else
679  {
680  REQUIRE(A.num_cols == B.num_rows, "Number of columns for A (%d) and \
681  number of rows for B (%d) should be equal!\n", A.num_cols, B.num_rows);
682  result = SGMatrix<T>(A.num_rows, B.num_cols);
683  }
684 
685  if (A.on_gpu())
686  to_gpu(result);
687 
688  matrix_prod(A, B, result, transpose_A, transpose_B);
689 
690  return result;
691 }
692 
699 template<typename T, template<typename> class Container>
700 T max(const Container<T>& a)
701 {
702  return infer_backend(a)->max(a);
703 }
704 
711 template<typename T, template<typename> class Container>
712 typename std::enable_if<!std::is_same<T, complex128_t>::value, float64_t>::type
713 mean(const Container<T>& a)
714 {
715  REQUIRE(a.size() > 0, "Vector/Matrix cannot be empty!\n");
716  return infer_backend(a)->mean(a);
717 }
718 
725 template<template<typename> class Container>
726 complex128_t mean(const Container<complex128_t>& a)
727 {
728  REQUIRE(a.size() > 0, "Vector/Matrix cannot be empty!\n");
729  return infer_backend(a)->mean(a);
730 }
731 
738 template <typename T, template<typename> class Container>
739 void range_fill(Container<T>& a, const T start=0)
740 {
741  infer_backend(a)->range_fill(a, start);
742 }
743 
754 template <typename T>
755 void scale(SGVector<T>& a, SGVector<T>& result, T alpha=1)
756 {
757  REQUIRE(result.vlen == a.vlen, "Length of vector result (%d) doesn't match vector a (%d).\n", result.vlen, a.vlen);
758  infer_backend(a, result)->scale(a, alpha, result);
759 }
760 
771 template <typename T>
772 void scale(SGMatrix<T>& A, SGMatrix<T>& result, T alpha=1)
773 {
774  REQUIRE((A.num_rows == result.num_rows), "Number of rows of matrix A (%d) must match matrix result (%d).\n",
775  A.num_rows, result.num_rows);
776  REQUIRE((A.num_cols == result.num_cols), "Number of columns of matrix A (%d) must match matrix result (%d).\n",
777  A.num_cols, result.num_cols);
778  infer_backend(A, result)->scale(A, alpha, result);
779 }
780 
789 template<typename T, template<typename> class Container>
790 Container<T> scale(Container<T>& a, T alpha=1)
791 {
792  auto result = a.clone();
793  scale(a, result, alpha);
794  return result;
795 }
796 
803 template <typename T, template<typename> class Container>
804 void set_const(Container<T>& a, T value)
805 {
806  infer_backend(a)->set_const(a, value);
807 }
808 
816 template <typename T, template <typename> class Container>
817 T sum(const Container<T>& a, bool no_diag=false)
818 {
819  return infer_backend(a)->sum(a, no_diag);
820 }
821 
830 template <typename T>
831 T sum(const Block<SGMatrix<T>>& a, bool no_diag=false)
832 {
833  return sg_linalg->get_cpu_backend()->sum(a, no_diag);
834 }
835 
843 template <typename T>
844 T sum_symmetric(const SGMatrix<T>& a, bool no_diag=false)
845 {
846  REQUIRE(a.num_rows == a.num_cols, "Matrix is not square!\n");
847  return infer_backend(a)->sum_symmetric(a, no_diag);
848 }
849 
858 template <typename T>
859 T sum_symmetric(const Block<SGMatrix<T>>& a, bool no_diag=false)
860 {
861  REQUIRE(a.m_row_size == a.m_col_size, "Matrix is not square!\n");
862  return sg_linalg->get_cpu_backend()->sum_symmetric(a, no_diag);
863 }
864 
872 template <typename T>
873 SGVector<T> colwise_sum(const SGMatrix<T>& mat, bool no_diag=false)
874 {
875  return infer_backend(mat)->colwise_sum(mat, no_diag);
876 }
877 
886 template <typename T>
887 SGVector<T> colwise_sum(const Block<SGMatrix<T>>& a, bool no_diag=false)
888 {
889  return sg_linalg->get_cpu_backend()->colwise_sum(a, no_diag);
890 }
891 
899 template <typename T>
900 SGVector<T> rowwise_sum(const SGMatrix<T>& mat, bool no_diag=false)
901 {
902  return infer_backend(mat)->rowwise_sum(mat, no_diag);
903 }
904 
913 template <typename T>
914 SGVector<T> rowwise_sum(const Block<SGMatrix<T>>& a, bool no_diag=false)
915 {
916  return sg_linalg->get_cpu_backend()->rowwise_sum(a, no_diag);
917 }
918 
919 }
920 
921 }
922 
923 #endif //LINALG_NAMESPACE_H_
T sum_symmetric(const SGMatrix< T > &a, bool no_diag=false)
std::complex< float64_t > complex128_t
Definition: common.h:77
bool on_gpu() const
Definition: SGVector.h:83
#define SG_SWARNING(...)
Definition: SGIO.h:177
void scale(SGVector< T > &a, SGVector< T > &result, T alpha=1)
SGMatrix< T > cholesky_factor(const SGMatrix< T > &A, const bool lower=true)
bool on_gpu() const
Definition: SGMatrix.h:79
SGVector< T > rowwise_sum(const SGMatrix< T > &mat, bool no_diag=false)
void add(SGVector< T > &a, SGVector< T > &b, SGVector< T > &result, T alpha=1, T beta=1)
#define REQUIRE(x,...)
Definition: SGIO.h:205
T dot(const SGVector< T > &a, const SGVector< T > &b)
void set_const(Container< T > &a, T value)
Generic class Block which wraps a matrix class and contains block specific information, providing a uniform way to deal with matrix blocks for all supported backend matrices.
LinalgBackendBase * infer_backend(const Container< T > &a)
std::enable_if<!std::is_same< T, complex128_t >::value, float64_t >::type mean(const Container< T > &a)
std::unique_ptr< SGLinalg > sg_linalg
SGVector< T > colwise_sum(const SGMatrix< T > &mat, bool no_diag=false)
SGMatrix< T > clone() const
Definition: SGMatrix.cpp:330
SGVector< T > cholesky_solver(const SGMatrix< T > &L, const SGVector< T > &b, const bool lower=true)
double float64_t
Definition: common.h:60
void range_fill(Container< T > &a, const T start=0)
index_t num_rows
Definition: SGMatrix.h:463
shogun vector
index_t num_cols
Definition: SGMatrix.h:465
void matrix_prod(SGMatrix< T > &A, SGVector< T > &b, SGVector< T > &result, bool transpose=false)
shogun matrix
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
T sum(const Container< T > &a, bool no_diag=false)
#define SG_SERROR(...)
Definition: SGIO.h:178
Base interface of generic linalg methods and generic memory transfer methods.
void to_gpu(SGVector< T > &a, SGVector< T > &b)
void from_gpu(SGVector< T > &a, SGVector< T > &b)
T max(const Container< T > &a)
void element_prod(Block< SGMatrix< T >> &a, Block< SGMatrix< T >> &b, SGMatrix< T > &result)
index_t vlen
Definition: SGVector.h:545
int32_t size() const
Definition: SGVector.h:136

SHOGUN Machine Learning Toolbox - Documentation