32 #ifndef VARIANCE_H1__H_ 33 #define VARIANCE_H1__H_ 54 #ifndef DOXYGEN_SHOULD_SKIP_THIS 57 VarianceH1() : m_lambda(1E-5), m_free_terms(true)
70 m_sum_colwise_x.resize(m_n_x);
71 m_sum_colwise_y.resize(m_n_y);
72 m_sum_rowwise_xy.resize(m_n_x);
73 m_sum_colwise_xy.resize(m_n_y);
74 std::fill(m_sum_colwise_x.begin(), m_sum_colwise_x.end(), 0);
75 std::fill(m_sum_colwise_y.begin(), m_sum_colwise_y.end(), 0);
76 std::fill(m_sum_rowwise_xy.begin(), m_sum_rowwise_xy.end(), 0);
77 std::fill(m_sum_colwise_xy.begin(), m_sum_colwise_xy.end(), 0);
79 if (m_second_order_terms.rows()==m_n_x && m_second_order_terms.cols()==m_n_x)
80 m_second_order_terms.setZero();
82 m_second_order_terms=Eigen::MatrixXd::Zero(m_n_x, m_n_x);
89 m_sum_colwise_x.resize(0);
90 m_sum_colwise_y.resize(0);
91 m_sum_rowwise_xy.resize(0);
92 m_sum_colwise_xy.resize(0);
93 m_second_order_terms=Eigen::MatrixXd::Zero(0, 0);
100 if (i<m_n_x && j<m_n_x)
102 m_sum_x+=2*kernel_value;
103 m_sum_sq_x+=2*kernel_value*kernel_value;
104 m_sum_colwise_x[i]+=kernel_value;
105 m_sum_colwise_x[j]+=kernel_value;
106 m_second_order_terms(i, j)+=kernel_value;
107 m_second_order_terms(j, i)+=kernel_value;
109 else if (i>=m_n_x && j>=m_n_x)
111 m_sum_y+=2*kernel_value;
112 m_sum_sq_y+=2*kernel_value*kernel_value;
113 m_sum_colwise_y[i-m_n_x]+=kernel_value;
114 m_sum_colwise_y[j-m_n_x]+=kernel_value;
115 m_second_order_terms(i-m_n_x, j-m_n_x)+=kernel_value;
116 m_second_order_terms(j-m_n_x, i-m_n_x)+=kernel_value;
118 else if (i<m_n_x && j>=m_n_x)
120 m_sum_xy+=kernel_value;
121 m_sum_sq_xy+=kernel_value*kernel_value;
124 m_second_order_terms(i, j-m_n_x)-=kernel_value;
125 m_second_order_terms(j-m_n_x, i)-=kernel_value;
127 m_sum_rowwise_xy[i]+=kernel_value;
128 m_sum_colwise_xy[j-m_n_x]+=kernel_value;
139 auto t_0=(map_sum_colwise_x.dot(map_sum_colwise_x)-m_sum_sq_x)/m_n_x/(m_n_x-1)/(m_n_x-2);
140 auto t_1=
CMath::sq(m_sum_x/m_n_x/(m_n_x-1));
142 auto t_2=map_sum_colwise_x.dot(map_sum_rowwise_xy)*2/m_n_x/(m_n_x-1)/m_n_y;
143 auto t_3=m_sum_x*m_sum_xy*2/m_n_x/m_n_x/(m_n_x-1)/m_n_y;
145 auto t_4=(map_sum_colwise_y.dot(map_sum_colwise_y)-m_sum_sq_y)/m_n_y/(m_n_y-1)/(m_n_y-2);
146 auto t_5=
CMath::sq(m_sum_y/m_n_y/(m_n_y-1));
148 auto t_6=map_sum_colwise_y.dot(map_sum_colwise_xy)*2/m_n_y/(m_n_y-1)/m_n_x;
149 auto t_7=m_sum_y*m_sum_xy*2/m_n_y/m_n_y/(m_n_y-1)/m_n_x;
151 auto t_8=(map_sum_rowwise_xy.dot(map_sum_rowwise_xy)-m_sum_sq_xy)/m_n_y/(m_n_y-1)/m_n_x;
152 auto t_9=2*
CMath::sq(m_sum_xy/m_n_x/m_n_y);
153 auto t_10=(map_sum_colwise_xy.dot(map_sum_colwise_xy)-m_sum_sq_xy)/m_n_x/(m_n_x-1)/m_n_y;
155 auto var_first=(t_0-t_1)-t_2+t_3+(t_4-t_5)-t_6+t_7+(t_8-t_9+t_10);
156 var_first*=4.0*(m_n_x-2)/m_n_x/(m_n_x-1);
158 auto var_second=2.0/m_n_x/m_n_y/(m_n_x-1)/(m_n_y-1)*m_second_order_terms.array().square().sum();
160 auto variance_estimate=var_first+var_second;
161 if (variance_estimate<0)
162 variance_estimate=var_second;
164 return variance_estimate;
167 template <
class Kernel>
168 float64_t operator()(
const Kernel& kernel)
170 ASSERT(m_n_x>0 && m_n_y>0);
172 const index_t size=m_n_x+m_n_y;
174 for (
auto j=0; j<size; ++j)
176 for (
auto i=0; i<j; ++i)
177 add_terms(kernel(i, j), i, j);
179 auto variance_estimate=compute_variance_estimate();
181 return variance_estimate;
184 SGVector<float64_t> operator()(
const KernelManager& kernel_mgr)
186 ASSERT(m_n_x>0 && m_n_y>0);
188 ASSERT(kernel_mgr.num_kernels()>0);
190 const index_t size=m_n_x+m_n_y;
191 SGVector<float64_t> result(kernel_mgr.num_kernels());
192 SelfAdjointPrecomputedKernel kernel_functor(SGVector<float32_t>(size*(size+1)/2));
193 for (
auto k=0; k<kernel_mgr.num_kernels(); ++k)
195 auto kernel=kernel_mgr.kernel_at(k);
197 kernel_functor.precompute(kernel);
199 for (
auto i=0; i<size; ++i)
201 for (
auto j=i+1; j<size; ++j)
202 add_terms(kernel_functor(i, j), i, j);
204 result[k]=compute_variance_estimate();
211 SGVector<float64_t> test_power(
const KernelManager& kernel_mgr)
213 ASSERT(m_n_x>0 && m_n_y>0);
215 ASSERT(kernel_mgr.num_kernels()>0);
216 ComputeMMD compute_mmd_job;
217 compute_mmd_job.m_n_x=m_n_x;
218 compute_mmd_job.m_n_y=m_n_y;
221 const index_t size=m_n_x+m_n_y;
222 SGVector<float64_t> result(kernel_mgr.num_kernels());
223 SelfAdjointPrecomputedKernel kernel_functor(SGVector<float32_t>(size*(size+1)/2));
224 for (
auto k=0; k<kernel_mgr.num_kernels(); ++k)
226 auto kernel=kernel_mgr.kernel_at(k);
228 kernel_functor.precompute(kernel);
230 for (
auto i=0; i<size; ++i)
232 for (
auto j=i+1; j<size; ++j)
233 add_terms(kernel_functor(i, j), i, j);
235 auto var_est=compute_variance_estimate();
236 auto mmd_est=compute_mmd_job(kernel_functor);
255 vector<float64_t> m_sum_colwise_x;
256 vector<float64_t> m_sum_colwise_y;
257 vector<float64_t> m_sum_rowwise_xy;
258 vector<float64_t> m_sum_colwise_xy;
263 #endif // DOXYGEN_SHOULD_SKIP_THIS 270 #endif // VARIANCE_H1__H_
all of classes and functions are contained in the shogun namespace
static float32_t sqrt(float32_t x)