SHOGUN  6.0.0
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/config.h>
13 #include <shogun/lib/common.h>
14 #include <shogun/io/SGIO.h>
15 #include <shogun/io/File.h>
16 #include <shogun/lib/Time.h>
17 #include <shogun/lib/Signal.h>
18 
19 #include <shogun/base/Parallel.h>
20 
21 #include <shogun/kernel/Kernel.h>
24 #include <shogun/base/Parameter.h>
25 
27 
28 #include <string.h>
29 #ifndef _WIN32
30 #include <unistd.h>
31 #endif
33 
34 using namespace shogun;
35 
37 {
38  init();
40 }
41 
42 CKernel::CKernel(int32_t size) : CSGObject()
43 {
44  init();
45 
46  if (size<10)
47  size=10;
48 
49  cache_size=size;
51 }
52 
53 
54 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
55 {
56  init();
57 
58  if (size<10)
59  size=10;
60 
61  cache_size=size;
62 
64  init(p_lhs, p_rhs);
66 }
67 
69 {
70  if (get_is_initialized())
71  SG_ERROR("Kernel still initialized on destruction.\n")
72 
75 
76  SG_INFO("Kernel deleted (%p).\n", this)
77 }
78 
79 
80 
81 bool CKernel::init(CFeatures* l, CFeatures* r)
82 {
83  /* make sure that features are not deleted if same ones are used */
84  SG_REF(l);
85  SG_REF(r);
86 
87  //make sure features were indeed supplied
88  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
89  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
90 
91  //make sure features are compatible
92  if (l->support_compatible_class())
93  {
95  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
96  l->get_name(), r->get_name());
97  }
98  else
99  {
101  "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
102  l->get_name(), r->get_name())
103  }
105 
106  //remove references to previous features
108 
109  //increase reference counts
110  SG_REF(l);
111  if (l==r)
112  lhs_equals_rhs=true;
113  else // l!=r
114  SG_REF(r);
115 
116  lhs=l;
117  rhs=r;
118 
121 
124 
125  /* unref "safety" refs from beginning */
126  SG_UNREF(r);
127  SG_UNREF(l);
128 
129  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
130  return true;
131 }
132 
134 {
135  SG_REF(n);
136  if (lhs && rhs)
137  n->init(this);
138 
140  normalizer=n;
141 
142  return (normalizer!=NULL);
143 }
144 
146 {
148  return normalizer;
149 }
150 
152 {
153  return normalizer->init(this);
154 }
155 
157 {
159 }
160 
161 
162 
163 void CKernel::load(CFile* loader)
164 {
167 }
168 
169 void CKernel::save(CFile* writer)
170 {
171  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
173  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
175 }
176 
178 {
179  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
180  if (rhs!=lhs)
181  SG_UNREF(rhs);
182  rhs = NULL;
183  num_rhs=0;
184 
185  SG_UNREF(lhs);
186  lhs = NULL;
187  num_lhs=0;
188  lhs_equals_rhs=false;
189 
190 
191  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
192 }
193 
195 {
196  if (rhs==lhs)
197  rhs=NULL;
198  SG_UNREF(lhs);
199  lhs = NULL;
200  num_lhs=0;
201  lhs_equals_rhs=false;
202 
203 }
204 
207 {
208  if (rhs!=lhs)
209  SG_UNREF(rhs);
210  rhs = NULL;
211  num_rhs=0;
212  lhs_equals_rhs=false;
213 
214 
215 }
216 
217 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
218 
220 {
221  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
223  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
224  "SLOWBUTMEMEFFICIENT");
225 
226  switch (get_kernel_type())
227  {
290  }
291 
292  switch (get_feature_class())
293  {
304  ENUM_CASE(C_WD)
316  }
317 
318  switch (get_feature_type())
319  {
334  }
335  SG_INFO("\n")
336 }
337 #undef ENUM_CASE
338 
340  int32_t count, int32_t *IDX, float64_t * weights)
341 {
342  SG_ERROR("kernel does not support linadd optimization\n")
343  return false ;
344 }
345 
347 {
348  SG_ERROR("kernel does not support linadd optimization\n")
349  return false;
350 }
351 
353 {
354  SG_ERROR("kernel does not support linadd optimization\n")
355  return 0;
356 }
357 
359  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
360  int32_t* IDX, float64_t* weights, float64_t factor)
361 {
362  SG_ERROR("kernel does not support batch computation\n")
363 }
364 
365 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
366 {
367  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
368 }
369 
371 {
372  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
373 }
374 
376 {
377  return 1;
378 }
379 
381  int32_t vector_idx, float64_t * subkernel_contrib)
382 {
383  SG_ERROR("kernel compute_by_subkernel not implemented\n")
384 }
385 
386 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
387 {
388  num_weights=1 ;
389  return &combined_kernel_weight ;
390 }
391 
393 {
394  int num_weights = 1;
395  const float64_t* weight = get_subkernel_weights(num_weights);
396  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
397 }
398 
400 {
401  ASSERT(weights.vector)
402  if (weights.vlen!=1)
403  SG_ERROR("number of subkernel weights should be one ...\n")
404 
405  combined_kernel_weight = weights.vector[0] ;
406 }
407 
409 {
410  if (kernel)
411  {
412  CKernel* casted=dynamic_cast<CKernel*>(kernel);
413  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
414  " of class \"%s\" is not a subclass of CKernel!\n",
415  kernel->get_name());
416  return casted;
417  }
418  else
419  return NULL;
420 }
421 
423 {
424  int32_t num_suppvec=svm->get_num_support_vectors();
425  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
426  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
427 
428  for (int32_t i=0; i<num_suppvec; i++)
429  {
430  sv_idx[i] = svm->get_support_vector(i);
431  sv_weight[i] = svm->get_alpha(i);
432  }
433  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
434 
435  SG_FREE(sv_idx);
436  SG_FREE(sv_weight);
437  return ret;
438 }
439 
441 {
443  if (lhs_equals_rhs)
444  rhs=lhs;
445 }
446 
448 {
450 
451  if (lhs_equals_rhs)
452  rhs=NULL;
453 }
454 
456 {
458 
459  if (lhs_equals_rhs)
460  rhs=lhs;
461 }
462 
464  SG_ADD(&cache_size, "cache_size",
465  "Cache size in MB.", MS_NOT_AVAILABLE);
466  SG_ADD((CSGObject**) &lhs, "lhs",
467  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
468  SG_ADD((CSGObject**) &rhs, "rhs",
469  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
470  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
471  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
472  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
474  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
476  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
477  "Combined kernel weight.", MS_AVAILABLE);
478  SG_ADD(&optimization_initialized, "optimization_initialized",
479  "Optimization is initialized.", MS_NOT_AVAILABLE);
480  SG_ADD((machine_int_t*) &opt_type, "opt_type",
481  "Optimization type.", MS_NOT_AVAILABLE);
482  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
483  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
484  MS_AVAILABLE);
485 }
486 
487 
488 void CKernel::init()
489 {
490  cache_size=10;
491  kernel_matrix=NULL;
492  lhs=NULL;
493  rhs=NULL;
494  num_lhs=0;
495  num_rhs=0;
496  lhs_equals_rhs=false;
501  normalizer=NULL;
502 
503 
504 
506 }
507 
508 namespace shogun
509 {
511 template <class T> struct K_THREAD_PARAM
512 {
516  int32_t start;
518  int32_t end;
520  int64_t total_start;
522  int64_t total_end;
524  int32_t m;
526  int32_t n;
528  T* result;
530  bool symmetric;
532  bool verbose;
533 };
534 }
535 
537  bool no_diag)
538 {
539  SG_DEBUG("Entering\n");
540 
541  REQUIRE(has_features(), "No features assigned to kernel\n")
542  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
543  REQUIRE(block_begin>=0 && block_begin<num_rhs,
544  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
545  REQUIRE(block_begin+block_size<=num_rhs,
546  "Invalid block size (%d) at starting index (%d, %d)! "
547  "Please use smaller blocks!", block_size, block_begin, block_begin)
548  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
549 
550  float64_t sum=0.0;
551 
552  // since the block is symmetric with main diagonal inside, we can save half
553  // the computation with using only the upper triangular part.
554  // this can be done in parallel
555  #pragma omp parallel for reduction(+:sum)
556  for (index_t i=0; i<block_size; ++i)
557  {
558  // compute the kernel values on the upper triangular part of the kernel
559  // matrix and compute sum on the fly
560  for (index_t j=i+1; j<block_size; ++j)
561  {
562  float64_t k=kernel(i+block_begin, j+block_begin);
563  sum+=k;
564  }
565  }
566 
567  // the actual sum would be twice of what we computed
568  sum*=2;
569 
570  // add the diagonal elements if required - keeping this check
571  // outside of the loop to save cycles
572  if (!no_diag)
573  {
574  #pragma omp parallel for reduction(+:sum)
575  for (index_t i=0; i<block_size; ++i)
576  {
577  float64_t diag=kernel(i+block_begin, i+block_begin);
578  sum+=diag;
579  }
580  }
581 
582  SG_DEBUG("Leaving\n");
583 
584  return sum;
585 }
586 
587 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
588  index_t block_size_row, index_t block_size_col, bool no_diag)
589 {
590  SG_DEBUG("Entering\n");
591 
592  REQUIRE(has_features(), "No features assigned to kernel\n")
593  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
594  block_begin_col>=0 && block_begin_col<num_rhs,
595  "Invalid block begin index (%d, %d)!\n",
596  block_begin_row, block_begin_col)
597  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
598  block_begin_col+block_size_col<=num_rhs,
599  "Invalid block size (%d, %d) at starting index (%d, %d)! "
600  "Please use smaller blocks!", block_size_row, block_size_col,
601  block_begin_row, block_begin_col)
602  REQUIRE(block_size_row>=1 && block_size_col>=1,
603  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
604 
605  // check if removal of diagonal is required/valid
606  if (no_diag && block_size_row!=block_size_col)
607  {
608  SG_WARNING("Not removing the main diagonal since block is not square!\n");
609  no_diag=false;
610  }
611 
612  float64_t sum=0.0;
613 
614  // this can be done in parallel for the rows/cols
615  #pragma omp parallel for reduction(+:sum)
616  for (index_t i=0; i<block_size_row; ++i)
617  {
618  // compute the kernel values and compute sum on the fly
619  for (index_t j=0; j<block_size_col; ++j)
620  {
621  float64_t k=no_diag && i==j ? 0 :
622  kernel(i+block_begin_row, j+block_begin_col);
623  sum+=k;
624  }
625  }
626 
627  SG_DEBUG("Leaving\n");
628 
629  return sum;
630 }
631 
633  index_t block_size, bool no_diag)
634 {
635  SG_DEBUG("Entering\n");
636 
637  REQUIRE(has_features(), "No features assigned to kernel\n")
638  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
639  REQUIRE(block_begin>=0 && block_begin<num_rhs,
640  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
641  REQUIRE(block_begin+block_size<=num_rhs,
642  "Invalid block size (%d) at starting index (%d, %d)! "
643  "Please use smaller blocks!", block_size, block_begin, block_begin)
644  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
645 
646  // initialize the vector that accumulates the row/col-wise sum on the go
647  SGVector<float64_t> row_sum(block_size);
648  row_sum.set_const(0.0);
649 
650  // since the block is symmetric with main diagonal inside, we can save half
651  // the computation with using only the upper triangular part.
652  // this can be done in parallel for the rows/cols
653  #pragma omp parallel for
654  for (index_t i=0; i<block_size; ++i)
655  {
656  // compute the kernel values on the upper triangular part of the kernel
657  // matrix and compute row-wise sum on the fly
658  for (index_t j=i+1; j<block_size; ++j)
659  {
660  float64_t k=kernel(i+block_begin, j+block_begin);
661  #pragma omp critical
662  {
663  row_sum[i]+=k;
664  row_sum[j]+=k;
665  }
666  }
667  }
668 
669  // add the diagonal elements if required - keeping this check
670  // outside of the loop to save cycles
671  if (!no_diag)
672  {
673  #pragma omp parallel for
674  for (index_t i=0; i<block_size; ++i)
675  {
676  float64_t diag=kernel(i+block_begin, i+block_begin);
677  row_sum[i]+=diag;
678  }
679  }
680 
681  SG_DEBUG("Leaving\n");
682 
683  return row_sum;
684 }
685 
687  block_begin, index_t block_size, bool no_diag)
688 {
689  SG_DEBUG("Entering\n");
690 
691  REQUIRE(has_features(), "No features assigned to kernel\n")
692  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
693  REQUIRE(block_begin>=0 && block_begin<num_rhs,
694  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
695  REQUIRE(block_begin+block_size<=num_rhs,
696  "Invalid block size (%d) at starting index (%d, %d)! "
697  "Please use smaller blocks!", block_size, block_begin, block_begin)
698  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
699 
700  // initialize the matrix that accumulates the row/col-wise sum on the go
701  // the first column stores the sum of kernel values
702  // the second column stores the sum of squared kernel values
703  SGMatrix<float64_t> row_sum(block_size, 2);
704  row_sum.set_const(0.0);
705 
706  // since the block is symmetric with main diagonal inside, we can save half
707  // the computation with using only the upper triangular part
708  // this can be done in parallel for the rows/cols
709 #pragma omp parallel for
710  for (index_t i=0; i<block_size; ++i)
711  {
712  // compute the kernel values on the upper triangular part of the kernel
713  // matrix and compute row-wise sum and squared sum on the fly
714  for (index_t j=i+1; j<block_size; ++j)
715  {
716  float64_t k=kernel(i+block_begin, j+block_begin);
717 #pragma omp critical
718  {
719  row_sum(i, 0)+=k;
720  row_sum(j, 0)+=k;
721  row_sum(i, 1)+=k*k;
722  row_sum(j, 1)+=k*k;
723  }
724  }
725  }
726 
727  // add the diagonal elements if required - keeping this check
728  // outside of the loop to save cycles
729  if (!no_diag)
730  {
731 #pragma omp parallel for
732  for (index_t i=0; i<block_size; ++i)
733  {
734  float64_t diag=kernel(i+block_begin, i+block_begin);
735  row_sum(i, 0)+=diag;
736  row_sum(i, 1)+=diag*diag;
737  }
738  }
739 
740  SG_DEBUG("Leaving\n");
741 
742  return row_sum;
743 }
744 
746  index_t block_begin_col, index_t block_size_row,
747  index_t block_size_col, bool no_diag)
748 {
749  SG_DEBUG("Entering\n");
750 
751  REQUIRE(has_features(), "No features assigned to kernel\n")
752  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
753  block_begin_col>=0 && block_begin_col<num_rhs,
754  "Invalid block begin index (%d, %d)!\n",
755  block_begin_row, block_begin_col)
756  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
757  block_begin_col+block_size_col<=num_rhs,
758  "Invalid block size (%d, %d) at starting index (%d, %d)! "
759  "Please use smaller blocks!", block_size_row, block_size_col,
760  block_begin_row, block_begin_col)
761  REQUIRE(block_size_row>=1 && block_size_col>=1,
762  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
763 
764  // check if removal of diagonal is required/valid
765  if (no_diag && block_size_row!=block_size_col)
766  {
767  SG_WARNING("Not removing the main diagonal since block is not square!\n");
768  no_diag=false;
769  }
770 
771  // initialize the vector that accumulates the row/col-wise sum on the go
772  // the first block_size_row entries store the row-wise sum of kernel values
773  // the nextt block_size_col entries store the col-wise sum of kernel values
774  SGVector<float64_t> sum(block_size_row+block_size_col);
775  sum.set_const(0.0);
776 
777  // this can be done in parallel for the rows/cols
778 #pragma omp parallel for
779  for (index_t i=0; i<block_size_row; ++i)
780  {
781  // compute the kernel values and compute sum on the fly
782  for (index_t j=0; j<block_size_col; ++j)
783  {
784  float64_t k=no_diag && i==j ? 0 :
785  kernel(i+block_begin_row, j+block_begin_col);
786 #pragma omp critical
787  {
788  sum[i]+=k;
789  sum[j+block_size_row]+=k;
790  }
791  }
792  }
793 
794  SG_DEBUG("Leaving\n");
795 
796  return sum;
797 }
798 
799 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
800 {
801  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
802  int32_t i_start=params->start;
803  int32_t i_end=params->end;
804  CKernel* k=params->kernel;
805  T* result=params->result;
806  bool symmetric=params->symmetric;
807  int32_t n=params->n;
808  int32_t m=params->m;
809  bool verbose=params->verbose;
810  int64_t total_start=params->total_start;
811  int64_t total_end=params->total_end;
812  int64_t total=total_start;
813 
814  for (int32_t i=i_start; i<i_end; i++)
815  {
816  int32_t j_start=0;
817 
818  if (symmetric)
819  j_start=i;
820 
821  for (int32_t j=j_start; j<n; j++)
822  {
823  float64_t v=k->kernel(i,j);
824  result[i+j*m]=v;
825 
826  if (symmetric && i!=j)
827  result[j+i*m]=v;
828 
829  if (verbose)
830  {
831  total++;
832 
833  if (symmetric && i!=j)
834  total++;
835 
836  if (total%100 == 0)
837  SG_OBJ_PROGRESS(k, total, total_start, total_end)
838 
840  break;
841  }
842  }
843 
844  }
845 
846  return NULL;
847 }
848 
849 template <class T>
851 {
852  T* result = NULL;
853 
854  REQUIRE(has_features(), "no features assigned to kernel\n")
855 
856  int32_t m=get_num_vec_lhs();
857  int32_t n=get_num_vec_rhs();
858 
859  int64_t total_num = int64_t(m)*n;
860 
861  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
862  bool symmetric= (lhs && lhs==rhs && m==n);
863 
864  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
865 
866  result=SG_MALLOC(T, total_num);
867 
868  int32_t num_threads=parallel->get_num_threads();
869  K_THREAD_PARAM<T> params;
870  int64_t step = total_num/num_threads;
871  index_t t = 0;
872  #pragma omp parallel for lastprivate(t) private(params)
873  for (t = 0; t < num_threads; ++t)
874  {
875  params.kernel = this;
876  params.result = result;
877  params.start = compute_row_start(t*step, n, symmetric);
878  params.end = compute_row_start((t+1)*step, n, symmetric);
879  params.total_start=t*step;
880  params.total_end=(t+1)*step;
881  params.n=n;
882  params.m=m;
883  params.symmetric=symmetric;
884  params.verbose=false;
885  CKernel::get_kernel_matrix_helper<T>((void*)&params);
886  }
887 
888  if (total_num % num_threads != 0)
889  {
890  params.kernel = this;
891  params.result = result;
892  params.start = compute_row_start(t*step, n, symmetric);
893  params.end = m;
894  params.total_start=t*step;
895  params.total_end=total_num;
896  params.n=n;
897  params.m=m;
898  params.symmetric=symmetric;
899  params.verbose=false;
900  CKernel::get_kernel_matrix_helper<T>((void*)&params);
901  }
902 
903  SG_DONE()
904 
905  return SGMatrix<T>(result,m,n,true);
906 }
907 
908 
909 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
910 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
911 
912 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
913 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
914 
virtual void clear_normal()
Definition: Kernel.cpp:370
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:440
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:81
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
#define SG_INFO(...)
Definition: SGIO.h:117
virtual void cleanup()
Definition: Kernel.cpp:156
#define SG_RESET_LOCALE
Definition: SGIO.h:85
#define SG_DONE()
Definition: SGIO.h:156
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:126
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:380
int32_t index_t
Definition: common.h:72
virtual bool get_feature_class_compatibility(EFeatureClass rhs) const
Definition: Features.cpp:355
int32_t num_rhs
number of feature vectors on right hand side
int32_t get_num_threads() const
Definition: Parallel.cpp:97
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:799
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:133
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:587
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:464
#define SG_ERROR(...)
Definition: SGIO.h:128
#define REQUIRE(x,...)
Definition: SGIO.h:205
virtual bool delete_optimization()
Definition: Kernel.cpp:346
float64_t kernel(int32_t idx_a, int32_t idx_b)
#define ENUM_CASE(n)
Definition: Kernel.cpp:217
uint64_t properties
Parallel * parallel
Definition: SGObject.h:561
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:206
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
#define SG_REF(x)
Definition: SGObject.h:52
#define SG_SET_LOCALE_C
Definition: SGIO.h:84
int32_t cache_size
cache_size in MB
bool get_is_initialized()
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:686
float64_t combined_kernel_weight
virtual void register_params()
Definition: Kernel.cpp:463
void save(CFile *writer)
Definition: Kernel.cpp:169
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:177
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:145
#define ASSERT(x)
Definition: SGIO.h:200
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:125
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:745
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:146
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:536
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:392
double float64_t
Definition: common.h:60
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:455
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:352
EOptimizationType get_optimization_type()
index_t num_rows
Definition: SGMatrix.h:463
virtual void save_serializable_post()
Definition: SGObject.cpp:469
void list_kernel()
Definition: Kernel.cpp:219
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:632
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
index_t num_cols
Definition: SGMatrix.h:465
int32_t num_lhs
number of feature vectors on left hand side
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:111
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:399
void set_const(T const_elem)
Definition: SGVector.cpp:184
virtual bool init_normalizer()
Definition: Kernel.cpp:151
bool optimization_initialized
EOptimizationType opt_type
void load(CFile *loader)
Definition: Kernel.cpp:163
virtual void load_serializable_post()
Definition: SGObject.cpp:459
CFeatures * rhs
feature vectors to occur on right hand side
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:408
#define SG_UNREF(x)
Definition: SGObject.h:53
#define SG_DEBUG(...)
Definition: SGIO.h:106
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:358
virtual bool support_compatible_class() const
Definition: Features.h:323
T sum(const Container< T > &a, bool no_diag=false)
bool lhs_equals_rhs
lhs
int machine_int_t
Definition: common.h:69
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:339
CFeatures * lhs
feature vectors to occur on left hand side
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:447
virtual void remove_lhs()
Definition: Kernel.cpp:194
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:375
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:422
A generic Support Vector Machine Interface.
Definition: SVM.h:49
The Kernel base class.
CKernelNormalizer * normalizer
void set_const(T const_elem)
Definition: SGMatrix.cpp:209
#define SG_WARNING(...)
Definition: SGIO.h:127
#define SG_ADD(...)
Definition: SGObject.h:94
virtual bool has_features()
virtual ~CKernel()
Definition: Kernel.cpp:68
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:365
virtual EFeatureType get_feature_type() const =0
index_t vlen
Definition: SGVector.h:545
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation