12 #include <shogun/lib/config.h> 71 SG_ERROR(
"Kernel still initialized on destruction.\n")
76 SG_INFO(
"Kernel deleted (%p).\n",
this)
88 REQUIRE(l,
"CKernel::init(%p, %p): Left hand side features required!\n", l, r)
89 REQUIRE(r,
"CKernel::init(%p, %p): Right hand side features required!\n", l, r)
95 "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
101 "Right hand side of features (%s) must be compatible with left hand side features (%s)\n",
129 SG_DEBUG(
"leaving CKernel::init(%p, %p)\n", l, r)
179 SG_DEBUG(
"entering CKernel::remove_lhs_and_rhs\n")
191 SG_DEBUG(
"leaving CKernel::remove_lhs_and_rhs\n")
217 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break; 224 "SLOWBUTMEMEFFICIENT");
340 int32_t count, int32_t *IDX,
float64_t * weights)
342 SG_ERROR(
"kernel does not support linadd optimization\n")
348 SG_ERROR(
"kernel does not support linadd optimization\n")
354 SG_ERROR(
"kernel does not support linadd optimization\n")
359 int32_t num_vec, int32_t* vec_idx,
float64_t* target, int32_t num_suppvec,
362 SG_ERROR(
"kernel does not support batch computation\n")
367 SG_ERROR(
"kernel does not support linadd optimization, add_to_normal not implemented\n")
372 SG_ERROR(
"kernel does not support linadd optimization, clear_normal not implemented\n")
381 int32_t vector_idx,
float64_t * subkernel_contrib)
383 SG_ERROR(
"kernel compute_by_subkernel not implemented\n")
403 SG_ERROR(
"number of subkernel weights should be one ...\n")
413 REQUIRE(casted,
"CKernel::obtain_from_generic(): Error, provided object" 414 " of class \"%s\" is not a subclass of CKernel!\n",
425 int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
428 for (int32_t i=0; i<num_suppvec; i++)
472 SG_ADD(&
num_lhs,
"num_lhs",
"Number of feature vectors on left hand side.",
474 SG_ADD(&
num_rhs,
"num_rhs",
"Number of feature vectors on right hand side.",
544 "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
546 "Invalid block size (%d) at starting index (%d, %d)! " 547 "Please use smaller blocks!", block_size, block_begin, block_begin)
548 REQUIRE(block_size>=1,
"Invalid block size (%d)!\n", block_size)
555 #pragma omp parallel for reduction(+:sum) 556 for (
index_t i=0; i<block_size; ++i)
560 for (
index_t j=i+1; j<block_size; ++j)
574 #pragma omp parallel for reduction(+:sum) 575 for (
index_t i=0; i<block_size; ++i)
594 block_begin_col>=0 && block_begin_col<
num_rhs,
595 "Invalid block begin index (%d, %d)!\n",
596 block_begin_row, block_begin_col)
598 block_begin_col+block_size_col<=
num_rhs,
599 "Invalid block size (%d, %d) at starting index (%d, %d)! " 600 "Please use smaller blocks!", block_size_row, block_size_col,
601 block_begin_row, block_begin_col)
602 REQUIRE(block_size_row>=1 && block_size_col>=1,
603 "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
606 if (no_diag && block_size_row!=block_size_col)
608 SG_WARNING(
"Not removing the main diagonal since block is not square!\n");
615 #pragma omp parallel for reduction(+:sum) 616 for (
index_t i=0; i<block_size_row; ++i)
619 for (
index_t j=0; j<block_size_col; ++j)
622 kernel(i+block_begin_row, j+block_begin_col);
633 index_t block_size,
bool no_diag)
640 "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
642 "Invalid block size (%d) at starting index (%d, %d)! " 643 "Please use smaller blocks!", block_size, block_begin, block_begin)
644 REQUIRE(block_size>=1,
"Invalid block size (%d)!\n", block_size)
653 #pragma omp parallel for 654 for (
index_t i=0; i<block_size; ++i)
658 for (
index_t j=i+1; j<block_size; ++j)
673 #pragma omp parallel for 674 for (
index_t i=0; i<block_size; ++i)
687 block_begin,
index_t block_size,
bool no_diag)
694 "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
696 "Invalid block size (%d) at starting index (%d, %d)! " 697 "Please use smaller blocks!", block_size, block_begin, block_begin)
698 REQUIRE(block_size>=1,
"Invalid block size (%d)!\n", block_size)
709 #pragma omp parallel for 710 for (
index_t i=0; i<block_size; ++i)
714 for (
index_t j=i+1; j<block_size; ++j)
731 #pragma omp parallel for 732 for (
index_t i=0; i<block_size; ++i)
736 row_sum(i, 1)+=diag*diag;
747 index_t block_size_col,
bool no_diag)
753 block_begin_col>=0 && block_begin_col<
num_rhs,
754 "Invalid block begin index (%d, %d)!\n",
755 block_begin_row, block_begin_col)
757 block_begin_col+block_size_col<=
num_rhs,
758 "Invalid block size (%d, %d) at starting index (%d, %d)! " 759 "Please use smaller blocks!", block_size_row, block_size_col,
760 block_begin_row, block_begin_col)
761 REQUIRE(block_size_row>=1 && block_size_col>=1,
762 "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
765 if (no_diag && block_size_row!=block_size_col)
767 SG_WARNING(
"Not removing the main diagonal since block is not square!\n");
778 #pragma omp parallel for 779 for (
index_t i=0; i<block_size_row; ++i)
782 for (
index_t j=0; j<block_size_col; ++j)
785 kernel(i+block_begin_row, j+block_begin_col);
789 sum[j+block_size_row]+=k;
802 int32_t i_start=params->
start;
803 int32_t i_end=params->
end;
812 int64_t total=total_start;
814 for (int32_t i=i_start; i<i_end; i++)
821 for (int32_t j=j_start; j<n; j++)
826 if (symmetric && i!=j)
833 if (symmetric && i!=j)
859 int64_t total_num = int64_t(m)*n;
862 bool symmetric= (
lhs &&
lhs==
rhs && m==n);
864 SG_DEBUG(
"returning kernel matrix of size %dx%d\n", m, n)
866 result=SG_MALLOC(T, total_num);
870 int64_t step = total_num/num_threads;
872 #pragma omp parallel for lastprivate(t) private(params) 873 for (t = 0; t < num_threads; ++t)
885 CKernel::get_kernel_matrix_helper<T>((
void*)¶ms);
888 if (total_num % num_threads != 0)
900 CKernel::get_kernel_matrix_helper<T>((
void*)¶ms);
912 template void* CKernel::get_kernel_matrix_helper<float64_t>(
void* p);
913 template void* CKernel::get_kernel_matrix_helper<float32_t>(
void* p);
virtual void clear_normal()
virtual const char * get_name() const =0
virtual void load_serializable_post()
virtual bool init(CFeatures *lhs, CFeatures *rhs)
int32_t get_num_support_vectors()
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
virtual bool get_feature_class_compatibility(EFeatureClass rhs) const
int32_t num_rhs
number of feature vectors on right hand side
int32_t get_num_threads() const
static void * get_kernel_matrix_helper(void *p)
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
virtual bool delete_optimization()
float64_t kernel(int32_t idx_a, int32_t idx_b)
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
int32_t cache_size
cache_size in MB
bool get_is_initialized()
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
float64_t combined_kernel_weight
virtual void register_params()
virtual void remove_lhs_and_rhs()
virtual CKernelNormalizer * get_normalizer()
Class SGObject is the base class of all shogun objects.
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
#define SG_OBJ_PROGRESS(o,...)
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
virtual SGVector< float64_t > get_subkernel_weights()
virtual EFeatureType get_feature_type()=0
KERNELCACHE_ELEM * kernel_matrix
A File access base class.
virtual void save_serializable_post()
virtual float64_t compute_optimized(int32_t vector_idx)
EOptimizationType get_optimization_type()
virtual void save_serializable_post()
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
int32_t num_lhs
number of feature vectors on left hand side
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
void set_const(T const_elem)
virtual bool init_normalizer()
bool optimization_initialized
EOptimizationType opt_type
virtual void load_serializable_post()
CFeatures * rhs
feature vectors to occur on right hand side
static CKernel * obtain_from_generic(CSGObject *kernel)
all of classes and functions are contained in the shogun namespace
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
virtual bool support_compatible_class() const
T sum(const Container< T > &a, bool no_diag=false)
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
CFeatures * lhs
feature vectors to occur on left hand side
The class Features is the base class of all feature objects.
virtual void save_serializable_pre()
virtual void remove_lhs()
virtual int32_t get_num_subkernels()
bool init_optimization_svm(CSVM *svm)
A generic Support Vector Machine Interface.
CKernelNormalizer * normalizer
void set_const(T const_elem)
virtual bool has_features()
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
virtual EFeatureType get_feature_type() const =0
virtual EFeatureClass get_feature_class()=0