11 #include <shogun/lib/config.h> 36 void CLibLinearRegression::init_defaults()
45 void CLibLinearRegression::register_parameters()
72 if (num_vec!=num_train_labels)
74 SG_ERROR(
"number of vectors %d does not match " 75 "number of training labels %d\n",
76 num_vec, num_train_labels);
85 liblinear_problem prob;
104 double* Cs = SG_MALLOC(
double, prob.l);
105 for(
int i = 0; i < prob.l; i++)
117 solve_l2r_l1l2_svr(w, &prob);
120 solve_l2r_l1l2_svr(w, &prob);
123 SG_ERROR(
"Error: unknown regression type\n")
160 void CLibLinearRegression::solve_l2r_l1l2_svr(
SGVector<float64_t>& w,
const liblinear_problem *prob)
165 int w_size = prob->n;
170 int *index =
new int[l];
174 double Gmax_new, Gnorm1_new;
175 double Gnorm1_init = 0.0;
176 double *beta =
new double[l];
177 double *QD =
new double[l];
181 double lambda[1], upper_bound[1];
196 for(i=0; i<w_size; i++)
200 QD[i] = prob->x->dot(i, prob->x,i);
201 prob->x->add_to_dense_vec(beta[i], i, w.
vector, w_size);
204 w.
vector[w_size]+=beta[i];
210 while(iter < max_iter)
215 for(i=0; i<active_size; i++)
221 for(s=0; s<active_size; s++)
224 G = -y[i] + lambda[
GETI(i)]*beta[i];
225 H = QD[i] + lambda[
GETI(i)];
227 G += prob->x->dense_dot(i, w.
vector, w_size);
233 double violation = 0;
240 else if(Gp>Gmax_old && Gn<-Gmax_old)
248 else if(beta[i] >= upper_bound[
GETI(i)])
252 else if(Gp < -Gmax_old)
260 else if(beta[i] <= -upper_bound[
GETI(i)])
264 else if(Gn > Gmax_old)
273 violation = fabs(Gp);
275 violation = fabs(Gn);
278 Gnorm1_new += violation;
283 else if(Gn > H*beta[i])
288 if(fabs(d) < 1.0e-12)
291 double beta_old = beta[i];
293 d = beta[i]-beta_old;
297 prob->x->add_to_dense_vec(d, i, w.
vector, w_size);
305 Gnorm1_init = Gnorm1_new;
310 if(Gnorm1_new <= eps*Gnorm1_init)
326 SG_INFO(
"\noptimization finished, #iter = %d\n", iter)
328 SG_INFO(
"\nWARNING: reaching max number of iterations\nUsing -s 11 may be faster\n\n")
333 for(i=0; i<w_size; i++)
338 v += p*fabs(beta[i]) - y[i]*beta[i] + 0.5*lambda[
GETI(i)]*beta[i]*beta[i];
343 SG_INFO(
"Objective value = %lf\n", v)
void set_max_iter(int32_t max_iter)
virtual bool train_machine(CFeatures *data=NULL)
virtual ELabelType get_label_type() const =0
bool get_use_bias() const
virtual void set_w(const SGVector< float64_t > src_w)
The class Labels models labels, i.e. class assignments of objects.
static const float64_t INFTY
infinity
virtual int32_t get_num_labels() const =0
real valued labels (e.g. for regression, classifier outputs)
static float64_t log10(float64_t v)
L2 regularized support vector regression with L1 epsilon tube loss.
virtual int32_t get_num_vectors() const =0
void tron(float64_t *w, float64_t max_train_time)
float64_t m_max_train_time
void set_liblinear_regression_type(LIBLINEAR_REGRESSION_TYPE st)
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
L2 regularized support vector regression with L2 epsilon tube loss.
L2 regularized support vector regression with L2 epsilon tube loss (dual)
static void clear_cancel()
virtual void set_features(CDotFeatures *feat)
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
LIBLINEAR_REGRESSION_TYPE m_liblinear_regression_type
float64_t get_tube_epsilon()
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
float64_t get_epsilon() const
void set_use_bias(bool use_bias)
static void swap(T &a, T &b)
virtual ~CLibLinearRegression()
virtual void set_labels(CLabels *lab)
#define SG_SABS_PROGRESS(...)
void set_epsilon(float64_t epsilon)