25 #include <ilcplex/cplex.h> 55 SG_INFO(
"trying to initialize CPLEX\n")
58 env = CPXopenCPLEX (&status);
63 SG_WARNING(
"Could not open CPLEX environment.\n")
64 CPXgeterrorstring (
env, status, errmsg);
72 status = CPXsetintparam (
env, CPX_PARAM_LPMETHOD, CPX_ALG_DUAL);
75 SG_ERROR(
"Failure to select dual lp optimization, error %d.\n", status)
79 status = CPXsetintparam (
env, CPX_PARAM_DATACHECK, CPX_ON);
82 SG_ERROR(
"Failure to turn on data checking, error %d.\n", status)
107 int* ind=SG_MALLOC(
int, num_kernels+1);
110 double const_term = 1-CMath::qsq(beta, num_kernels,
mkl_norm);
113 ASSERT(CMath::fequal(const_term, 0.0))
115 for (int32_t i=0; i<num_kernels; i++)
119 lin_term[i]=grad_beta[i] - 2*beta[i]*hess_beta[i];
120 const_term+=grad_beta[i]*beta[i] -
CMath::sq(beta[i])*hess_beta[i];
123 ind[num_kernels]=2*num_kernels;
124 hess_beta[num_kernels]=0;
125 lin_term[num_kernels]=0;
136 status = CPXaddqconstr (
env,
lp_cplex, num_kernels+1, num_kernels+1, const_term,
'L', ind, lin_term,
137 ind, ind, hess_beta, NULL);
164 SG_WARNING(
"CPXfreeprob failed, error code %d.\n", status)
171 int32_t status = CPXcloseCPLEX (&
env);
177 SG_WARNING(
"Could not close CPLEX environment.\n")
178 CPXgeterrorstring (
env, status, errmsg);
197 glp_set_obj_dir(
lp_glpk, GLP_MIN);
204 glp_term_out(GLP_OFF);
220 int status = glp_get_status(
lp_glpk);
222 if (status==GLP_INFEAS)
227 else if(status==GLP_NOFEAS)
229 SG_SPRINT(
"problem has no feasible solution!\n")
245 SG_DEBUG(
"creating MKL object %p\n",
this)
256 SG_DEBUG(
"deleting MKL object %p\n",
this)
262 void CMKL::register_params()
298 if (
self->lp_glpk_parm !=
nullptr)
300 cloned->
self->lp_glpk_parm = SG_MALLOC(glp_smcp, 1);
301 *cloned->
self->lp_glpk_parm = *
self->lp_glpk_parm;
304 if (
self->lp_glpk !=
nullptr)
306 cloned->
self->lp_glpk = glp_create_prob();
307 glp_copy_prob(cloned->
self->lp_glpk,
self->lp_glpk, GLP_ON);
312 SG_ERROR(
"Cloning MKL using the CPLEX solver is currently not supported.\n");
343 SG_ERROR(
"%s::train_machine(): Number of training vectors (%d) does" 344 " not match number of labels (%d)\n",
get_name(),
352 SG_ERROR(
"No constraint generator (SVM) set\n")
369 int32_t num_weights = -1;
371 SG_INFO(
"num_kernels = %d\n", num_kernels)
374 ASSERT(num_weights==num_kernels)
418 self->cleanup_cplex();
438 SG_ERROR(
"Interleaved MKL optimization is currently " 439 "only supported with SVMlight\n");
452 #ifdef USE_REFERENCE_COUNTING 453 int32_t refs=this->ref();
459 #ifdef USE_REFERENCE_COUNTING 479 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
502 for (int32_t i=0; i<nsv; i++)
515 SG_ERROR(
"Norm must be >= 1, e.g., 1-norm is the standard MKL; norms>1 nonsparse MKL\n")
522 if (lambda>1 || lambda<0)
527 else if (lambda==1.0)
546 SG_SWARNING(
"MKL Algorithm terminates PREMATURELY due to current training time exceeding get_max_train_time ()= %f . It may have not converged yet!\n",
get_max_train_time ())
553 ASSERT(nweights==num_kernels)
556 #if defined(USE_CPLEX) || defined(USE_GLPK) 557 int32_t inner_iters=0;
562 for (int32_t i=0; i<num_kernels; i++)
565 mkl_objective+=old_beta[i]*sumw[i];
599 SG_ERROR(
"Solver type not supported (not compiled in?)\n")
620 int32_t nofKernelsGood;
623 nofKernelsGood = num_kernels;
626 for (p=0; p<num_kernels; ++p )
628 if (sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
630 beta[p] =
CMath::sqrt(sumw[p]*old_beta[p]*old_beta[p]);
647 for( p=0; p<num_kernels; ++p )
652 SG_PRINT(
"MKL-direct: p = %.3f\n", 1.0 )
653 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
654 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
655 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
656 for( p=0; p<num_kernels; ++p )
661 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
662 SG_PRINT(
"MKL-direct: preR/p = %e\n", preR )
664 SG_PRINT(
"MKL-direct: R = %e\n", R )
665 SG_ERROR(
"Assertion R >= 0 failed!\n" )
669 for( p=0; p<num_kernels; ++p )
677 for( p=0; p<num_kernels; ++p )
686 for( p=0; p<num_kernels; ++p )
694 for (p=0; p<num_kernels; ++p )
697 obj += sumw[p] * beta[p];
706 std::list<int32_t> I;
708 for (int32_t i=0; i<len;i++)
718 for (std::list<int32_t>::iterator it=I.begin(); it!=I.end(); it++)
747 for (int32_t i=0; i<n; i++)
751 for (int32_t j=0; j<n; j++)
787 }
while(ff>ff_old+1e-4*gg_old*(del-del_old));
799 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
814 for( p=0; p<num_kernels; ++p )
828 for( p=0; p<num_kernels; ++p )
833 for( p=0; p<num_kernels; ++p )
834 obj += sumw[p] * beta[p];
850 int32_t nofKernelsGood;
853 nofKernelsGood = num_kernels;
854 for( p=0; p<num_kernels; ++p )
857 if( sumw[p] >= 0.0 && old_beta[p] >= 0.0 )
859 beta[p] = sumw[p] * old_beta[p]*old_beta[p] /
mkl_norm;
872 for( p=0; p<num_kernels; ++p )
877 for( p=0; p<num_kernels; ++p )
882 for( p=0; p<num_kernels; ++p )
883 preR +=
CMath::sq( old_beta[p] - beta[p]);
889 SG_PRINT(
"MKL-direct: nofKernelsGood = %d\n", nofKernelsGood )
890 SG_PRINT(
"MKL-direct: Z = %e\n", Z )
891 SG_PRINT(
"MKL-direct: eps = %e\n", epsRegul )
892 for( p=0; p<num_kernels; ++p )
895 SG_PRINT(
"MKL-direct: t[%3d] = %e ( diff = %e = %e - %e )\n", p, t, old_beta[p]-beta[p], old_beta[p], beta[p] )
897 SG_PRINT(
"MKL-direct: preR = %e\n", preR )
900 SG_PRINT(
"MKL-direct: R = %e\n", R )
901 SG_ERROR(
"Assertion R >= 0 failed!\n" )
905 for( p=0; p<num_kernels; ++p )
913 for( p=0; p<num_kernels; ++p )
923 for( p=0; p<num_kernels; ++p )
924 obj += sumw[p] * beta[p];
930 const float64_t* old_beta, int32_t num_kernels,
937 SG_ERROR(
"MKL via NEWTON works only for norms>1\n")
939 const double epsBeta = 1e-32;
940 const double epsGamma = 1e-12;
941 const double epsWsq = 1e-12;
942 const double epsNewt = 0.0001;
943 const double epsStep = 1e-9;
944 const int nofNewtonSteps = 3;
945 const double hessRidge = 1e-6;
946 const int inLogSpace = 0;
961 for( p=0; p<num_kernels; ++p )
963 beta[p] = old_beta[p];
964 if( !( beta[p] >= epsBeta ) )
967 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
972 if( !( fabs(Z-1.0) <= epsGamma ) )
974 SG_WARNING(
"old_beta not normalized (diff=%e); forcing normalization. ", Z-1.0 )
975 for( p=0; p<num_kernels; ++p )
980 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
986 for ( p=0; p<num_kernels; ++p )
988 if ( !( sumw[p] >= 0 ) )
990 if( !( sumw[p] >= -epsWsq ) )
991 SG_WARNING(
"sumw[%d] = %e; treated as 0. ", p, sumw[p] )
1003 if( !( gamma > epsGamma ) )
1005 SG_WARNING(
"bad gamma: %e; set to %e. ", gamma, epsGamma )
1009 ASSERT( gamma >= epsGamma )
1014 for( p=0; p<num_kernels; ++p )
1016 obj += beta[p] * sumw[p];
1019 if( !( obj >= 0.0 ) )
1025 for (i = 0; i < nofNewtonSteps; ++i )
1030 for( p=0; p<num_kernels; ++p )
1032 ASSERT( 0.0 <= beta[p] && beta[p] <= 1.0 )
1036 const float halfw2p = ( sumw[p] >= 0.0 ) ? (sumw[p]*old_beta[p]*old_beta[p]) : 0.0;
1038 const float64_t t1 = ( t0 < 0 ) ? 0.0 : t0;
1041 newtDir[p] = t1 / ( t1 + t2*beta[p] + hessRidge );
1043 newtDir[p] = ( t1 == 0.0 ) ? 0.0 : ( t1 / t2 );
1045 ASSERT( newtDir[p] == newtDir[p] )
1053 while( stepSize >= epsStep )
1059 for( p=0; p<num_kernels; ++p )
1062 newtBeta[p] = beta[p] *
CMath::exp( + stepSize * newtDir[p] );
1064 newtBeta[p] = beta[p] + stepSize * newtDir[p];
1065 if( !( newtBeta[p] >= epsBeta ) )
1066 newtBeta[p] = epsBeta;
1078 for( p=0; p<num_kernels; ++p )
1081 if( newtBeta[p] > 1.0 )
1086 ASSERT( 0.0 <= newtBeta[p] && newtBeta[p] <= 1.0 )
1092 for( p=0; p<num_kernels; ++p )
1093 newtObj += sumw[p] * old_beta[p]*old_beta[p] / newtBeta[p];
1095 if ( newtObj < obj - epsNewt*stepSize*obj )
1097 for( p=0; p<num_kernels; ++p )
1098 beta[p] = newtBeta[p];
1106 if( stepSize < epsStep )
1114 for( p=0; p<num_kernels; ++p )
1115 obj += beta[p] * sumw[p];
1130 int32_t NUMCOLS = 2*num_kernels + 1;
1131 double* x=SG_MALLOC(
double, NUMCOLS);
1137 double obj[NUMCOLS];
1141 for (int32_t i=0; i<2*num_kernels; i++)
1148 for (int32_t i=num_kernels; i<2*num_kernels; i++)
1151 obj[2*num_kernels]=1 ;
1152 lb[2*num_kernels]=-CPX_INFBOUND ;
1153 ub[2*num_kernels]=CPX_INFBOUND ;
1155 int status = CPXnewcols (
self->env,
self->lp_cplex, NUMCOLS, obj, lb, ub, NULL, NULL);
1158 CPXgeterrorstring (
self->env, status, errmsg);
1163 SG_INFO(
"adding the first row\n")
1164 int initial_rmatbeg[1];
1165 int initial_rmatind[num_kernels+1];
1166 double initial_rmatval[num_kernels+1];
1167 double initial_rhs[1];
1168 char initial_sense[1];
1173 initial_rmatbeg[0] = 0;
1175 initial_sense[0]=
'E' ;
1178 for (int32_t i=0; i<num_kernels; i++)
1180 initial_rmatind[i]=i ;
1181 initial_rmatval[i]=1 ;
1183 initial_rmatind[num_kernels]=2*num_kernels ;
1184 initial_rmatval[num_kernels]=0 ;
1186 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, num_kernels+1,
1187 initial_rhs, initial_sense, initial_rmatbeg,
1188 initial_rmatind, initial_rmatval, NULL, NULL);
1193 initial_rmatbeg[0] = 0;
1195 initial_sense[0]=
'L' ;
1197 initial_rmatind[0]=2*num_kernels ;
1198 initial_rmatval[0]=0 ;
1200 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 1,
1201 initial_rhs, initial_sense, initial_rmatbeg,
1202 initial_rmatind, initial_rmatval, NULL, NULL);
1207 for (int32_t i=0; i<num_kernels; i++)
1209 initial_rmatind[i]=i ;
1210 initial_rmatval[i]=1 ;
1212 initial_rmatind[num_kernels]=2*num_kernels ;
1213 initial_rmatval[num_kernels]=0 ;
1215 status = CPXaddqconstr (
self->env,
self->lp_cplex, 0, num_kernels+1, 1.0,
'L', NULL, NULL,
1216 initial_rmatind, initial_rmatind, initial_rmatval, NULL);
1222 SG_ERROR(
"Failed to add the first row.\n")
1228 for (int32_t q=0; q<num_kernels-1; q++)
1245 rmatind[2]=num_kernels+q ;
1247 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 3,
1248 rhs, sense, rmatbeg,
1249 rmatind, rmatval, NULL, NULL);
1251 SG_ERROR(
"Failed to add a smothness row (1).\n")
1260 rmatind[2]=num_kernels+q ;
1262 status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, 3,
1263 rhs, sense, rmatbeg,
1264 rmatind, rmatval, NULL, NULL);
1266 SG_ERROR(
"Failed to add a smothness row (2).\n")
1275 int rmatind[num_kernels+1];
1276 double rmatval[num_kernels+1];
1288 for (int32_t i=0; i<num_kernels; i++)
1292 rmatval[i]=-(sumw[i]-suma) ;
1294 rmatval[i]=-sumw[i];
1296 rmatind[num_kernels]=2*num_kernels ;
1297 rmatval[num_kernels]=-1 ;
1299 int32_t status = CPXaddrows (
self->env,
self->lp_cplex, 0, 1, num_kernels+1,
1300 rhs, sense, rmatbeg,
1301 rmatind, rmatval, NULL, NULL);
1303 SG_ERROR(
"Failed to add the new row.\n")
1311 status = CPXlpopt (
self->env,
self->lp_cplex);
1313 status = CPXbaropt(
self->env,
self->lp_cplex);
1318 for (int32_t i=0; i<num_kernels; i++)
1319 beta[i]=old_beta[i];
1320 for (int32_t i=num_kernels; i<2*num_kernels+1; i++)
1328 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1330 set_qnorm_constraints(beta, num_kernels);
1332 status = CPXbaropt(
self->env,
self->lp_cplex);
1334 SG_ERROR(
"Failed to optimize Problem.\n")
1338 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1339 (
double*) beta, NULL, NULL, NULL);
1343 CMath::display_vector(beta, num_kernels,
"beta");
1344 SG_ERROR(
"Failed to obtain solution.\n")
1347 CMath::scale_vector(1/CMath::qnorm(beta, num_kernels,
mkl_norm), beta, num_kernels);
1361 SG_ERROR(
"Failed to optimize Problem.\n")
1364 int32_t cur_numrows=(int32_t) CPXgetnumrows(
self->env,
self->lp_cplex);
1365 int32_t cur_numcols=(int32_t) CPXgetnumcols(
self->env,
self->lp_cplex);
1366 int32_t num_rows=cur_numrows;
1367 ASSERT(cur_numcols<=2*num_kernels+1)
1378 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1379 (
double*) x, (
double*) pi, (
double*) slack, NULL);
1383 status=CPXsolution(
self->env,
self->lp_cplex, &solstat, &objval,
1384 (
double*) x, NULL, (
double*) slack, NULL);
1387 int32_t solution_ok = (!status) ;
1389 SG_ERROR(
"Failed to obtain solution.\n")
1391 int32_t num_active_rows=0 ;
1396 int32_t max_idx = -1 ;
1397 int32_t start_row = 1 ;
1399 start_row+=2*(num_kernels-1);
1401 for (int32_t i = start_row; i < cur_numrows; i++)
1409 if (slack[i]>max_slack)
1411 max_slack=slack[i] ;
1422 if (slack[i]>max_slack)
1424 max_slack=slack[i] ;
1432 if ( (num_rows-start_row>
CMath::max(100,2*num_active_rows)) && (max_idx!=-1))
1435 status = CPXdelrows (
self->env,
self->lp_cplex, max_idx, max_idx) ;
1437 SG_ERROR(
"Failed to remove an old row.\n")
1442 rho = -x[2*num_kernels] ;
1454 for (int32_t i=0; i<num_kernels; i++)
1459 SG_ERROR(
"Cplex not enabled at compile time\n")
1470 SG_ERROR(
"MKL via GLPK works only for norm=1\n")
1474 int32_t NUMCOLS = 2*num_kernels + 1 ;
1480 glp_add_cols(
self->lp_glpk, NUMCOLS);
1481 for (
int i=1; i<=2*num_kernels; i++)
1483 glp_set_obj_coef(
self->lp_glpk, i, 0);
1484 glp_set_col_bnds(
self->lp_glpk, i, GLP_DB, 0, 1);
1486 for (
int i=num_kernels+1; i<=2*num_kernels; i++)
1488 glp_set_obj_coef(
self->lp_glpk, i,
C_mkl);
1490 glp_set_obj_coef(
self->lp_glpk, NUMCOLS, 1);
1491 glp_set_col_bnds(
self->lp_glpk, NUMCOLS, GLP_FR, 0,0);
1494 int row_index = glp_add_rows(
self->lp_glpk, 1);
1495 int* ind = SG_MALLOC(
int, num_kernels+2);
1497 for (
int i=1; i<=num_kernels; i++)
1502 ind[num_kernels+1] = NUMCOLS;
1503 val[num_kernels+1] = 0;
1504 glp_set_mat_row(
self->lp_glpk, row_index, num_kernels, ind, val);
1505 glp_set_row_bnds(
self->lp_glpk, row_index, GLP_FX, 1, 1);
1513 for (int32_t q=1; q<num_kernels; q++)
1517 int mat_row_index = glp_add_rows(
self->lp_glpk, 2);
1522 mat_ind[3] = num_kernels+q;
1524 glp_set_mat_row(
self->lp_glpk, mat_row_index, 3, mat_ind, mat_val);
1525 glp_set_row_bnds(
self->lp_glpk, mat_row_index, GLP_UP, 0, 0);
1528 glp_set_mat_row(
self->lp_glpk, mat_row_index+1, 3, mat_ind, mat_val);
1529 glp_set_row_bnds(
self->lp_glpk, mat_row_index+1, GLP_UP, 0, 0);
1534 int* ind=SG_MALLOC(
int,num_kernels+2);
1536 int row_index = glp_add_rows(
self->lp_glpk, 1);
1537 for (int32_t i=1; i<=num_kernels; i++)
1540 val[i] = -(sumw[i-1]-suma);
1542 ind[num_kernels+1] = 2*num_kernels+1;
1543 val[num_kernels+1] = -1;
1544 glp_set_mat_row(
self->lp_glpk, row_index, num_kernels+1, ind, val);
1545 glp_set_row_bnds(
self->lp_glpk, row_index, GLP_UP, 0, 0);
1550 glp_simplex(
self->lp_glpk,
self->lp_glpk_parm);
1551 bool res =
self->check_glp_status();
1553 SG_ERROR(
"Failed to optimize Problem.\n")
1555 int32_t cur_numrows = glp_get_num_rows(
self->lp_glpk);
1556 int32_t cur_numcols = glp_get_num_cols(
self->lp_glpk);
1557 int32_t num_rows=cur_numrows;
1558 ASSERT(cur_numcols<=2*num_kernels+1)
1564 for (
int i=0; i<cur_numrows; i++)
1566 row_primal[i] = glp_get_row_prim(
self->lp_glpk, i+1);
1567 row_dual[i] = glp_get_row_dual(
self->lp_glpk, i+1);
1569 for (
int i=0; i<cur_numcols; i++)
1570 col_primal[i] = glp_get_col_prim(
self->lp_glpk, i+1);
1572 obj = -col_primal[2*num_kernels];
1574 for (
int i=0; i<num_kernels; i++)
1575 beta[i] = col_primal[i];
1577 int32_t num_active_rows=0;
1581 int32_t max_idx = -1;
1582 int32_t start_row = 1;
1584 start_row += 2*(num_kernels-1);
1586 for (int32_t i= start_row; i<cur_numrows; i++)
1592 if (row_primal[i]<max_slack)
1594 max_slack = row_primal[i];
1600 if ((num_rows-start_row>
CMath::max(100, 2*num_active_rows)) && max_idx!=-1)
1603 del_rows[1] = max_idx+1;
1604 glp_del_rows(
self->lp_glpk, 1, del_rows);
1609 SG_FREE(row_primal);
1610 SG_FREE(col_primal);
1612 SG_ERROR(
"Glpk not enabled at compile time\n")
1628 ASSERT(nweights==num_kernels)
1631 for (int32_t i=0; i<num_kernels; i++)
1637 for (int32_t n=0; n<num_kernels; n++)
1644 for (int32_t i=0; i<nsv; i++)
1648 for (int32_t j=0; j<nsv; j++)
1680 for (int32_t i=0; i<n; i++)
1684 for (int32_t j=0; j<n; j++)
1700 mkl_obj=-0.5*mkl_obj;
1707 SG_ERROR(
"cannot compute objective, labels or kernel not set\n")
void set_shrinking_enabled(bool enable)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
int32_t get_num_support_vectors()
void set_bias_enabled(bool enable_bias)
void set_mkl_block_norm(float64_t q)
bool cleanup_cplex(bool &lp_init)
void set_max_train_time(float64_t t)
static T * clone_vector(const T *vec, int32_t len)
virtual CSGObject * clone()
static const float64_t INFTY
infinity
virtual CSGObject * clone()
virtual int32_t get_num_labels() const =0
virtual void init_training()=0
void elasticnet_transform(float64_t *beta, float64_t lmd, int32_t len)
float64_t compute_optimal_betas_block_norm(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
virtual int32_t get_num_vectors() const =0
virtual void compute_sum_beta(float64_t *sumw)
void set_callback_function(CMKL *m, bool(*cb)(CMKL *mkl, const float64_t *sumw, const float64_t suma))
float64_t compute_optimal_betas_directly(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t kernel(int32_t idx_a, int32_t idx_b)
void set_mkl_norm(float64_t norm)
virtual const char * get_name() const
void set_nu(float64_t nue)
float64_t compute_elasticnet_dual_objective()
virtual bool perform_mkl_step(const float64_t *sumw, float64_t suma)
virtual float64_t compute_mkl_dual_objective()
float64_t cur_time_diff(bool verbose=false)
CTime training_time_clock
Class SGObject is the base class of all shogun objects.
float64_t compute_optimal_betas_newton(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, float64_t mkl_objective)
void set_constraint_generator(CSVM *s)
bool get_batch_computation_enabled()
void set_bias(float64_t bias)
void set_batch_computation_enabled(bool enable)
static void clear_cancel()
bool get_shrinking_enabled()
void elasticnet_dual(float64_t *ff, float64_t *gg, float64_t *hh, const float64_t &del, const float64_t *nm, int32_t len, const float64_t &lambda)
bool set_alpha(int32_t idx, float64_t val)
float64_t start(bool verbose=false)
void set_qpsize(int32_t qps)
float64_t get_max_train_time()
float64_t get_alpha(int32_t idx)
ESolverType get_solver_type()
virtual const float64_t * get_subkernel_weights(int32_t &num_weights)
The Combined kernel is used to combine a number of kernels into a single CombinedKernel object by lin...
bool set_support_vector(int32_t idx, int32_t val)
Multiple Kernel Learning.
virtual EMachineType get_classifier_type()
bool cleanup_glpk(bool &lp_init)
static void scale_vector(T alpha, T *vec, int32_t len)
Scale vector inplace.
int32_t get_support_vector(int32_t idx)
bool interleaved_optimization
static bool cancel_computations()
virtual void set_subkernel_weights(SGVector< float64_t > weights)
float64_t compute_optimal_betas_elasticnet(float64_t *beta, const float64_t *old_beta, const int32_t num_kernels, const float64_t *sumw, const float64_t suma, const float64_t mkl_objective)
float64_t compute_optimal_betas_via_cplex(float64_t *beta, const float64_t *old_beta, int32_t num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
void add_vector(bool **param, index_t *length, const char *name, const char *description="")
all of classes and functions are contained in the shogun namespace
bool get_linadd_enabled()
virtual float64_t compute_sum_alpha()=0
virtual bool train_machine(CFeatures *data=NULL)
T sum(const Container< T > &a, bool no_diag=false)
virtual EKernelType get_kernel_type()=0
The class Features is the base class of all feature objects.
static float64_t exp(float64_t x)
virtual bool train(CFeatures *data=NULL)
void set_qnorm_constraints(float64_t *beta, int32_t num_kernels)
virtual int32_t get_num_subkernels()
A generic Support Vector Machine Interface.
void set_linadd_enabled(bool enable)
void set_elasticnet_lambda(float64_t elasticnet_lambda)
void set_epsilon(float64_t eps)
void set_kernel(CKernel *k)
static float32_t sqrt(float32_t x)
virtual void set_labels(CLabels *lab)
static bool perform_mkl_step_helper(CMKL *mkl, const float64_t *sumw, const float64_t suma)
void set_solver_type(ESolverType st)
static int32_t pow(bool x, int32_t n)
void set_C(float64_t c_neg, float64_t c_pos)
float64_t compute_optimal_betas_via_glpk(float64_t *beta, const float64_t *old_beta, int num_kernels, const float64_t *sumw, float64_t suma, int32_t &inner_iters)
bool create_new_model(int32_t num)