SHOGUN  6.0.0
NLOPTMinimizer.cpp
Go to the documentation of this file.
1  /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2015 Wu Lin
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * The views and conclusions contained in the software and documentation are those
27  * of the authors and should not be interpreted as representing official policies,
28  * either expressed or implied, of the Shogun Development Team.
29  *
30  */
31 
34 #include <shogun/base/Parameter.h>
35 #include <algorithm>
36 
37 using namespace shogun;
38 #ifdef USE_GPL_SHOGUN
39 CNLOPTMinimizer::CNLOPTMinimizer()
41 {
42  init();
43 }
44 
45 CNLOPTMinimizer::~CNLOPTMinimizer()
46 {
47 }
48 
49 CNLOPTMinimizer::CNLOPTMinimizer(FirstOrderCostFunction *fun)
51 {
52  init();
53 }
54 
55 void CNLOPTMinimizer::init()
56 {
57 #ifdef HAVE_NLOPT
58  m_target_variable=SGVector<float64_t>();
59  set_nlopt_parameters();
60  SG_ADD(&m_max_iterations, "CNLOPTMinimizer__m_max_iterations",
61  "max_iterations in CNLOPTMinimizer", MS_NOT_AVAILABLE);
62  SG_ADD(&m_variable_tolerance, "CNLOPTMinimizer__m_variable_tolerance",
63  "variable_tolerance in CNLOPTMinimizer", MS_NOT_AVAILABLE);
64  SG_ADD(&m_function_tolerance, "CNLOPTMinimizer__m_function_tolerance",
65  "function_tolerance in CNLOPTMinimizer", MS_NOT_AVAILABLE);
66  SG_ADD(&m_nlopt_algorithm_id, "CNLOPTMinimizer__m_nlopt_algorithm_id",
67  "nlopt_algorithm_id in CNLOPTMinimizer", MS_NOT_AVAILABLE);
68 #endif
69 }
70 
71 float64_t CNLOPTMinimizer::minimize()
72 {
73 #ifdef HAVE_NLOPT
74  init_minimization();
75 
76  nlopt_opt opt=nlopt_create(get_nlopt_algorithm(m_nlopt_algorithm_id),
77  m_target_variable.vlen);
78 
79  //add bound constraints
80  FirstOrderBoundConstraintsCostFunction* bound_constraints_fun
81  =dynamic_cast<FirstOrderBoundConstraintsCostFunction *>(m_fun);
82  if(bound_constraints_fun)
83  {
84  SGVector<float64_t> bound=bound_constraints_fun->get_lower_bound();
85  if(bound.vlen==1)
86  {
87  nlopt_set_lower_bounds1(opt, bound[0]);
88  }
89  else if (bound.vlen>1)
90  {
91  REQUIRE(bound.vlen==m_target_variable.vlen,
92  "The length of target variable (%d) and the length of lower bound (%d) do not match\n",
93  m_target_variable.vlen, bound.vlen);
94  nlopt_set_lower_bounds(opt, bound.vector);
95  }
96 
97  bound=bound_constraints_fun->get_upper_bound();
98  if(bound.vlen==1)
99  {
100  nlopt_set_upper_bounds1(opt, bound[0]);
101  }
102  else if (bound.vlen>1)
103  {
104  REQUIRE(bound.vlen==m_target_variable.vlen,
105  "The length of target variable (%d) and the length of upper bound (%d) do not match\n",
106  m_target_variable.vlen, bound.vlen);
107  nlopt_set_upper_bounds(opt, bound.vector);
108  }
109 
110  }
111  // set maximum number of evaluations
112  nlopt_set_maxeval(opt, m_max_iterations);
113  // set absolute argument tolearance
114  nlopt_set_xtol_abs1(opt, m_variable_tolerance);
115  nlopt_set_ftol_abs(opt, m_function_tolerance);
116 
117  nlopt_set_min_objective(opt, CNLOPTMinimizer::nlopt_function, this);
118 
119 #endif
120  // the minimum objective value, upon return
121  double cost=0.0;
122 
123 #ifdef HAVE_NLOPT
124  // optimize our function
125  nlopt_result error_code=nlopt_optimize(opt, m_target_variable.vector, &cost);
126  if(error_code!=1)
127  {
128  SG_SWARNING("Error(s) happened and NLopt failed during minimization (error code:%d)\n",
129  error_code);
130  }
131 
132  // clean up
133  nlopt_destroy(opt);
134 #endif
135 
136  return cost;
137 }
138 
139 #ifdef HAVE_NLOPT
140 int16_t CNLOPTMinimizer::get_nlopt_algorithm_id(ENLOPTALGORITHM method)
141 {
142  int16_t method_id=-1;
143  switch(method)
144  {
145  case GN_DIRECT:
146  method_id = (int16_t) NLOPT_GN_DIRECT;
147  break;
148  case GN_DIRECT_L:
149  method_id = (int16_t) NLOPT_GN_DIRECT_L;
150  break;
151  case GN_DIRECT_L_RAND:
152  method_id = (int16_t) NLOPT_GN_DIRECT_L_RAND;
153  break;
154  case GN_DIRECT_NOSCAL:
155  method_id = (int16_t) NLOPT_GN_DIRECT_NOSCAL;
156  break;
157  case GN_DIRECT_L_NOSCAL:
158  method_id = (int16_t) NLOPT_GN_DIRECT_L_NOSCAL;
159  break;
160  case GN_DIRECT_L_RAND_NOSCAL:
161  method_id = (int16_t) NLOPT_GN_DIRECT_L_RAND_NOSCAL;
162  break;
163  case GN_ORIG_DIRECT:
164  method_id = (int16_t) NLOPT_GN_ORIG_DIRECT;
165  break;
166  case GN_ORIG_DIRECT_L:
167  method_id = (int16_t) NLOPT_GN_ORIG_DIRECT_L;
168  break;
169  case GN_CRS2_LM:
170  method_id = (int16_t) NLOPT_GN_CRS2_LM;
171  break;
172  case GN_ISRES:
173  method_id = (int16_t) NLOPT_GN_ISRES;
174  break;
175  case LD_MMA:
176  method_id = (int16_t) NLOPT_LD_MMA;
177  break;
178  case LD_LBFGS:
179  method_id = (int16_t) NLOPT_LD_LBFGS;
180  break;
181  case LD_LBFGS_NOCEDAL:
182  method_id = (int16_t) NLOPT_LD_LBFGS_NOCEDAL;
183  break;
184  case LD_VAR1:
185  method_id = (int16_t) NLOPT_LD_VAR1;
186  break;
187  case LD_VAR2:
188  method_id = (int16_t) NLOPT_LD_VAR2;
189  break;
190  case LD_TNEWTON:
191  method_id = (int16_t) NLOPT_LD_TNEWTON;
192  break;
193  case LD_TNEWTON_RESTART:
194  method_id = (int16_t) NLOPT_LD_TNEWTON_RESTART;
195  break;
196  case LD_TNEWTON_PRECOND:
197  method_id = (int16_t) NLOPT_LD_TNEWTON_PRECOND;
198  break;
199  case LD_TNEWTON_PRECOND_RESTART:
200  method_id = (int16_t) NLOPT_LD_TNEWTON_PRECOND_RESTART;
201  break;
202  case LD_SLSQP:
203  method_id = (int16_t) NLOPT_LD_SLSQP;
204  break;
205  case LN_PRAXIS:
206  method_id = (int16_t) NLOPT_LN_PRAXIS;
207  break;
208  case LN_COBYLA:
209  method_id = (int16_t) NLOPT_LN_COBYLA;
210  break;
211  case LN_NEWUOA:
212  method_id = (int16_t) NLOPT_LN_NEWUOA;
213  break;
214  case LN_NEWUOA_BOUND:
215  method_id = (int16_t) NLOPT_LN_NEWUOA_BOUND;
216  break;
217  case LN_NELDERMEAD:
218  method_id = (int16_t) NLOPT_LN_NELDERMEAD;
219  break;
220  case LN_SBPLX:
221  method_id = (int16_t) NLOPT_LN_SBPLX;
222  break;
223  case LN_BOBYQA:
224  method_id = (int16_t) NLOPT_LN_BOBYQA;
225  break;
226  case AUGLAG:
227  method_id = (int16_t) NLOPT_AUGLAG;
228  break;
229  case AUGLAG_EQ:
230  method_id = (int16_t) NLOPT_AUGLAG_EQ;
231  break;
232  case G_MLSL:
233  method_id = (int16_t) NLOPT_G_MLSL;
234  break;
235  case G_MLSL_LDS:
236  method_id = (int16_t) NLOPT_G_MLSL_LDS;
237  break;
238  };
239  REQUIRE(method_id>=0, "Unsupported algorithm\n");
240  return method_id;
241 }
242 
243 void CNLOPTMinimizer::set_nlopt_parameters(ENLOPTALGORITHM algorithm,
244  float64_t max_iterations,
245  float64_t variable_tolerance,
246  float64_t function_tolerance)
247 {
248  m_nlopt_algorithm_id=get_nlopt_algorithm_id(algorithm);
249  m_max_iterations=max_iterations;
250  m_variable_tolerance=variable_tolerance;
251  m_function_tolerance=function_tolerance;
252 };
253 
254 double CNLOPTMinimizer::nlopt_function(unsigned dim, const double* variable, double* gradient,
255  void* func_data)
256 {
257  CNLOPTMinimizer* obj_prt=static_cast<CNLOPTMinimizer *>(func_data);
258  REQUIRE(obj_prt, "The instance object passed to NLopt optimizer should not be NULL\n");
259  REQUIRE((index_t)dim==(obj_prt->m_target_variable).vlen, "Length must be matched\n");
260 
261  double *var = const_cast<double *>(variable);
262  std::swap_ranges(var, var+dim, (obj_prt->m_target_variable).vector);
263 
264  double cost=obj_prt->m_fun->get_cost();
265 
266  //get the gradient wrt variable_new
267  SGVector<float64_t> grad=obj_prt->m_fun->get_gradient();
268 
269  REQUIRE(grad.vlen==(index_t)dim,
270  "The length of gradient (%d) and the length of variable (%d) do not match\n",
271  grad.vlen,dim);
272 
273  std::copy(grad.vector,grad.vector+dim,gradient);
274 
275  std::swap_ranges(var, var+dim, (obj_prt->m_target_variable).vector);
276  return cost;
277 }
278 
279 void CNLOPTMinimizer::init_minimization()
280 {
281  REQUIRE(m_fun, "Cost function not set!\n");
282  m_target_variable=m_fun->obtain_variable_reference();
283  REQUIRE(m_target_variable.vlen>0,"Target variable from cost function must not empty!\n");
284 }
285 #endif
286 
287 #endif //USE_GPL_SHOGUN
virtual SGVector< float64_t > get_upper_bound()=0
int32_t index_t
Definition: common.h:72
#define SG_SWARNING(...)
Definition: SGIO.h:177
#define REQUIRE(x,...)
Definition: SGIO.h:205
The first order cost function base class with bound constrains.
double float64_t
Definition: common.h:60
The first order cost function base class.
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual SGVector< float64_t > get_lower_bound()=0
#define SG_ADD(...)
Definition: SGObject.h:94
index_t vlen
Definition: SGVector.h:545
The first order minimizer base class.

SHOGUN Machine Learning Toolbox - Documentation