28 void CGaussianARDKernel::init()
40 REQUIRE(
rhs,
"Right features (rhs) not set!\n")
42 if (
lhs==
rhs && idx_a==idx_b)
75 bool status=CExponentialARDKernel::init(l,r);
85 REQUIRE(df,
"Features not set\n")
88 for (int32_t i=0; i<num_vec; i++)
89 sq[i]=df->
dot(i,df, i);
110 SG_SERROR(
"Provided kernel is not of type CGaussianARDKernel!\n");
137 return res[0]*scalar_weight;
161 int32_t col_index=index;
163 int32_t total_offset=0;
164 while(col_index>=offset && offset>0)
167 total_offset+=offset;
171 col_index+=row_index;
180 result=res[0]*bvec[col_index];
186 result+=res[0]*avec[col_index];
188 if(row_index==col_index)
205 REQUIRE(param,
"Param not set\n");
211 if (!strcmp(param->
m_name,
"log_weights"))
223 for (
index_t j=0; j<length; j++)
225 if (!strcmp(param->
m_name,
"log_weights") )
244 SG_ERROR(
"Can't compute derivative wrt %s parameter\n", param->
m_name);
253 REQUIRE(param,
"Param not set\n");
255 if (!strcmp(param->
m_name,
"log_weights"))
263 SG_ERROR(
"Can't compute derivative wrt %s parameter\n", param->
m_name);
271 REQUIRE(param,
"Param not set\n");
275 if (!strcmp(param->
m_name,
"log_weights"))
287 derivative(j,k)=
CMath::exp(-dist)*(-dist*2.0);
300 SG_ERROR(
"Can't compute derivative wrt %s parameter\n", param->
m_name);
virtual SGMatrix< float64_t > get_parameter_gradient(const TParameter *param, index_t index=-1)
SGVector< float64_t > m_log_weights
virtual float64_t compute_gradient_helper(SGVector< float64_t > avec, SGVector< float64_t > bvec, float64_t scale, index_t index)
int32_t num_rhs
number of feature vectors on right hand side
virtual float64_t distance(int32_t idx_a, int32_t idx_b)
virtual SGVector< float64_t > get_parameter_gradient_diagonal(const TParameter *param, index_t index=-1)
SGVector< float64_t > m_sq_rhs
static CGaussianARDKernel * obtain_from_generic(CKernel *kernel)
virtual SGMatrix< float64_t > compute_right_product(SGVector< float64_t >vec, float64_t &scalar_weight)
void scale(SGVector< T > &a, SGVector< T > &result, T alpha=1)
virtual float64_t dot(int32_t vec_idx1, CDotFeatures *df, int32_t vec_idx2)=0
virtual int32_t get_num_vectors() const =0
void add(SGVector< T > &a, SGVector< T > &b, SGVector< T > &result, T alpha=1, T beta=1)
float64_t kernel(int32_t idx_a, int32_t idx_b)
virtual float64_t get_parameter_gradient_helper(const TParameter *param, index_t index, int32_t idx_a, int32_t idx_b, SGVector< float64_t > avec, SGVector< float64_t > bvec)
virtual float64_t compute(int32_t idx_a, int32_t idx_b)
Features that support dot products among other operations.
SGMatrix< float64_t > get_weighted_vector(SGVector< float64_t > vec)
Gaussian Kernel with Automatic Relevance Detection computed on CDotFeatures.
virtual float64_t compute_helper(SGVector< float64_t > avec, SGVector< float64_t >bvec)
virtual void precompute_squared()
virtual SGVector< float64_t > get_feature_vector(int32_t idx, CFeatures *hs)
int32_t num_lhs
number of feature vectors on left hand side
virtual ~CGaussianARDKernel()
SGVector< float64_t > m_sq_lhs
void matrix_prod(SGMatrix< T > &A, SGVector< T > &b, SGVector< T > &result, bool transpose=false)
virtual void check_weight_gradient_index(index_t index)
CFeatures * rhs
feature vectors to occur on right hand side
all of classes and functions are contained in the shogun namespace
virtual EKernelType get_kernel_type()=0
EARDKernelType m_ARD_type
CFeatures * lhs
feature vectors to occur on left hand side
The class Features is the base class of all feature objects.
static float64_t exp(float64_t x)
static float64_t log(float64_t v)
virtual SGVector< float64_t > precompute_squared_helper(CDotFeatures *df)
Exponential Kernel with Automatic Relevance Detection computed on CDotFeatures.