26 using namespace Eigen;
29 :
CKernel(size), append_subkernel_weights(asw)
34 SG_INFO(
"(subkernel weights are appended)\n")
36 SG_INFO(
"Combined kernel created (%p)\n",
this)
47 SG_INFO(
"Combined kernel deleted (%p).\n",
this)
60 float64_t max_coeff=eigen_log_wt.maxCoeff();
61 VectorXd tmp = eigen_log_wt.array() - max_coeff;
63 eigen_wt = tmp.array() -
sum;
64 eigen_wt = eigen_wt.array().exp();
81 SG_DEBUG(
"Initialising combined kernel's combined features with the " 82 "same instance from parameters\n");
93 return init(combined_l, combined_r);
98 " of class %s but need to be combined features!\n",
101 " of class %s but need to be combined features!\n",
113 SG_DEBUG(
"Starting for loop for kernels\n")
119 SG_ERROR(
"Kernel at position %d is NULL\n", k_idx);
132 SG_ERROR(
"CombinedKernel: Number of features/kernels does not match - bailing out\n")
136 result=k->
init(lf,rf);
145 SG_DEBUG(
"Initializing 0x%p - \"%s\" (skipping init, this is a CUSTOM kernel)\n",
this, k->
get_name())
147 SG_ERROR(
"No kernel matrix was assigned to this Custom kernel\n")
159 SG_INFO(
"CombinedKernel: Initialising the following kernel failed\n")
172 SG_ERROR(
"CombinedKernel: Number of features/kernels does not match - bailing out\n")
251 SG_INFO(
"BEGIN COMBINED KERNEL LIST - ")
260 SG_INFO(
"END COMBINED KERNEL LIST - ")
278 int32_t count, int32_t *IDX,
float64_t *weights)
280 SG_DEBUG(
"initializing CCombinedKernel optimization\n")
284 bool have_non_optimizable=
false;
296 SG_WARNING(
"non-optimizable kernel 0x%X in kernel-list\n", k)
297 have_non_optimizable=
true;
302 have_non_optimizable=
true;
303 SG_WARNING(
"init_optimization of kernel 0x%X failed\n", k)
309 if (have_non_optimizable)
311 SG_WARNING(
"some kernels in the kernel-list are not optimized\n")
313 sv_idx=SG_MALLOC(int32_t, count);
316 for (int32_t i=0; i<count; i++)
351 int32_t num_vec, int32_t* vec_idx,
float64_t* result, int32_t num_suppvec,
383 int32_t num_suppvec, int32_t* IDX,
float64_t* weights)
394 #pragma omp parallel for 395 for (int32_t i=0; i<num_vec; ++i)
403 ASSERT(IDX!=NULL || num_suppvec==0)
404 ASSERT(weights!=NULL || num_suppvec==0)
408 #pragma omp parallel for 409 for (int32_t i=0; i<num_vec; i++)
412 for (int32_t j=0; j<num_suppvec; j++)
413 sub_result += weights[j] * k->
kernel(IDX[j], vec_idx[i]);
425 SG_ERROR(
"CCombinedKernel optimization not initialized\n")
487 int32_t idx,
float64_t * subkernel_contrib)
523 SG_DEBUG(
"entering CCombinedKernel::get_subkernel_weights()\n")
531 SG_DEBUG(
"appending kernel weights\n")
540 for (int32_t j=0; j<num; j++)
549 SG_DEBUG(
"not appending kernel weights\n")
561 SG_DEBUG(
"leaving CCombinedKernel::get_subkernel_weights()\n")
577 for (int32_t i=0; i<num; i++)
650 void CCombinedKernel::init()
665 "Support vector index.");
667 "Support vector weights.");
676 SG_ADD(&subkernel_log_weights,
"subkernel_log_weights",
705 if (!strcmp(param->
m_name,
"combined_kernel_weight"))
735 if (!strcmp(param->
m_name,
"subkernel_log_weights"))
748 float64_t max_coeff = eigen_log_wt.maxCoeff();
749 VectorXd tmp = eigen_log_wt.array() - max_coeff;
756 eigen_res = eigen_res * factor;
784 derivative(g,h)*=coeff;
796 result(g,h)+=derivative(g,h);
813 SG_SERROR(
"CCombinedKernel::obtain_from_generic(): provided kernel is " 814 "not of type CCombinedKernel!\n");
833 int32_t num_combinations = 1;
834 int32_t list_index = 0;
843 SG_SERROR(
"CCombinedKernel::combine_kernels() : Failed to cast list of type " 844 "%s to type CList\n", list->
get_name());
849 SG_SERROR(
"CCombinedKernel::combine_kernels() : Sub-list in position %d " 850 "is empty.\n", list_index);
864 for (
index_t i=0; i<num_combinations; ++i)
882 bool first_kernel =
true;
888 first_kernel =
false;
891 SG_SERROR(
"CCombinedKernel::combine_kernels() : Sub-list in position " 892 "0 contains different types of kernels\n");
922 c_list =
dynamic_cast<CList*
>(list);
932 first_kernel =
false;
935 SG_SERROR(
"CCombinedKernel::combine_kernels() : Sub-list in position " 936 "%d contains different types of kernels\n", list_index);
945 for (
index_t index=0; index<freq; ++index)
virtual void clear_normal()
virtual const char * get_name() const =0
virtual void remove_lhs()
virtual bool init(CFeatures *lhs, CFeatures *rhs)
virtual void remove_rhs()
virtual void compute_by_subkernel(int32_t idx, float64_t *subkernel_contrib)
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
CSGObject * get_next_element()
SGVector< float64_t > subkernel_log_weights
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
virtual void set_subkernel_weights(SGVector< float64_t > weights)
int32_t num_rhs
number of feature vectors on right hand side
The Custom Kernel allows for custom user provided kernel matrices.
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
virtual void clear_normal()
bool append_kernel(CKernel *k)
CSGObject * get_element(int32_t index) const
void set_is_initialized(bool p_init)
virtual bool delete_optimization()
CDynamicObjectArray * kernel_array
float64_t kernel(int32_t idx_a, int32_t idx_b)
virtual void set_optimization_type(EOptimizationType t)
int32_t get_num_subkernels()
virtual void set_optimization_type(EOptimizationType t)
int32_t get_num_kernels()
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
virtual int32_t get_num_vec_lhs()
SGMatrix< float64_t > get_kernel_matrix()
bool get_is_initialized()
virtual void remove_lhs_and_rhs()
bool has_property(EKernelProperty p)
CSGObject * get_first_element()
virtual void remove_lhs_and_rhs()
virtual ~CCombinedKernel()
virtual SGVector< float64_t > get_subkernel_weights()
Class SGObject is the base class of all shogun objects.
CKernel * get_kernel(int32_t idx)
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
void set_combined_kernel_weight(float64_t nw)
virtual float64_t compute(int32_t x, int32_t y)
virtual float64_t compute_optimized(int32_t vector_idx)
float64_t get_combined_kernel_weight()
static CList * combine_kernels(CList *kernel_list)
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
virtual EFeatureClass get_feature_class() const =0
int32_t get_num_elements()
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
Dynamic array class for CSGObject pointers that creates an array that can be used like a list or an a...
int32_t num_lhs
number of feature vectors on left hand side
bool append_subkernel_weights
virtual int32_t get_num_vec_rhs()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
virtual bool init_normalizer()
void add_vector(bool **param, index_t *length, const char *name, const char *description="")
all of classes and functions are contained in the shogun namespace
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
T sum(const Container< T > &a, bool no_diag=false)
static CCombinedKernel * obtain_from_generic(CKernel *kernel)
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
virtual bool delete_optimization()
The class Features is the base class of all feature objects.
bool append_element(CSGObject *data)
static float64_t exp(float64_t x)
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
virtual void add_to_normal(int32_t idx, float64_t weight)
virtual const char * get_name() const
void emulate_compute_batch(CKernel *k, int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *weights)
static float64_t log(float64_t v)
virtual void remove_lhs()
virtual int32_t get_num_subkernels()
virtual float64_t compute_optimized(int32_t idx)
void push_back(CSGObject *e)
float64_t * subkernel_weights_buffer
bool precompute_subkernels()
virtual bool has_features()
The class CombinedFeatures is used to combine a number of of feature objects into a single CombinedFe...
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
virtual EFeatureType get_feature_type() const =0
Class List implements a doubly connected list for low-level-objects.
bool append_feature_obj(CFeatures *obj)
bool enable_subkernel_weight_opt
CCombinedKernel(int32_t size=10, bool append_subkernel_weights=false)
bool append_element(CSGObject *e)
virtual void init_subkernel_weights()
virtual void enable_subkernel_weight_learning()