SHOGUN  6.0.0
OptimizationSolver.cpp
Go to the documentation of this file.
1 /*
2  * Copyright (c) The Shogun Machine Learning Toolbox
3  * Written (w) 2012 - 2013 Heiko Strathmann
4  * Written (w) 2014 - 2017 Soumyajit De
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice, this
11  * list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  * The views and conclusions contained in the software and documentation are those
28  * of the authors and should not be interpreted as representing official policies,
29  * either expressed or implied, of the Shogun Development Team.
30  */
31 
32 #include <functional>
33 #include <algorithm>
34 #include <numeric>
35 #include <vector>
36 #include <shogun/io/SGIO.h>
37 #include <shogun/lib/SGVector.h>
38 #include <shogun/lib/SGMatrix.h>
41 
42 //#ifdef USE_GPL_SHOGUN
43 #include <shogun/lib/external/libqp.h>
44 //#endif // USE_GPL_SHOGUN
45 
46 using namespace shogun;
47 using namespace internal;
48 
50 {
52 //#ifdef USE_GPL_SHOGUN
53  SGVector<float64_t> solve() const;
54  void init();
55  static const float64_t* get_Q_col(uint32_t i);
56  static void print_state(libqp_state_T state);
57 
63 //#endif // USE_GPL_SHOGUN
64 };
65 
66 //#ifdef USE_GPL_SHOGUN
68 //#endif // USE_GPL_SHOGUN
69 
71 {
72 //#ifdef USE_GPL_SHOGUN
73  m_Q=Q;
74  m_mmds=mmds;
75  init();
76 //#endif // USE_GPL_SHOGUN
77 }
78 
79 //#ifdef USE_GPL_SHOGUN
81 {
82  opt_max_iterations=10000;
83  opt_epsilon=1E-14;
84  opt_low_cut=1E-6;
85 }
86 
88 {
89  return &m_Q[m_Q.num_rows*i];
90 }
91 
92 void OptimizationSolver::Self::print_state(libqp_state_T state)
93 {
94  SG_SDEBUG("libqp state: primal=%f\n", state.QP);
95 }
96 
98 {
99  const index_t num_kernels=m_mmds.size();
100  float64_t sum_m_mmds=std::accumulate(m_mmds.data(), m_mmds.data()+m_mmds.size(), 0);
101  SGVector<float64_t> weights(num_kernels);
102  if (std::any_of(m_mmds.data(), m_mmds.data()+m_mmds.size(), [](float64_t& value) { return value > 0; }))
103  {
104  SG_SDEBUG("At least one MMD entry is positive, performing optimisation\n")
105 
106  std::vector<float64_t> Q_diag(num_kernels);
107  std::vector<float64_t> f(num_kernels, 0);
108  std::vector<float64_t> lb(num_kernels, 0);
109  std::vector<float64_t> ub(num_kernels, CMath::INFTY);
110 
111  // initial point has to be feasible, i.e. m_mmds'*x = b
112  std::fill(weights.data(), weights.data()+weights.size(), 1.0/sum_m_mmds);
113 
114  for (index_t i=0; i<num_kernels; ++i)
115  Q_diag[i]=m_Q(i,i);
116 
117  SG_SDEBUG("starting libqp optimization\n");
118  libqp_state_T qp_exitflag=libqp_gsmo_solver(&OptimizationSolver::Self::get_Q_col,
119  Q_diag.data(),
120  f.data(),
121  m_mmds.data(),
122  1,
123  lb.data(),
124  ub.data(),
125  weights.data(),
126  num_kernels,
127  opt_max_iterations,
128  opt_epsilon,
130 
131  SG_SDEBUG("libqp returns: nIts=%d, exit_flag: %d\n", qp_exitflag.nIter, qp_exitflag.exitflag);
132  m_Q=SGMatrix<float64_t>();
133 
134  // set really small entries to zero and sum up for normalization
135  float64_t sum_weights=0;
136  for (index_t i=0; i<weights.vlen; ++i)
137  {
138  if (weights[i]<opt_low_cut)
139  {
140  SG_SDEBUG("lowcut: weight[%i]=%f<%f setting to zero\n", i, weights[i], opt_low_cut);
141  weights[i]=0;
142  }
143  sum_weights+=weights[i];
144  }
145 
146  // normalize (allowed since problem is scale invariant)
147  std::for_each(weights.data(), weights.data()+weights.size(), [&sum_weights](float64_t& weight)
148  {
149  weight/=sum_weights;
150  });
151  }
152  else
153  {
154  SG_SWARNING("All mmd estimates are negative. This is techically possible,"
155  "although extremely rare. Consider using different kernels. "
156  "This combination will lead to a bad two-sample test. Since any"
157  "combination is bad, will now just return equally distributed "
158  "kernel weights\n");
159 
160  // if no element is positive, we can choose arbritary weights since
161  // the results will be bad anyway
162  std::fill(weights.data(), weights.data()+weights.size(), 1.0/num_kernels);
163  }
164  return weights;
165 }
166 //#endif // USE_GPL_SHOGUN
167 
168 OptimizationSolver::OptimizationSolver(const SGVector<float64_t>& mmds, const SGMatrix<float64_t>& Q)
169 {
170  self=std::unique_ptr<Self>(new Self(mmds, Q));
171 }
172 
173 OptimizationSolver::~OptimizationSolver()
174 {
175 }
176 
177 SGVector<float64_t> OptimizationSolver::solve() const
178 {
179 //#ifdef USE_GPL_SHOGUN
180  return self->solve();
181 //#else // USE_GPL_SHOGUN
182 // SG_SWARNING("Presently this feature is only available with GNU GPLv3 license!");
183 // return SGVector<float64_t>();
184 //#endif // USE_GPL_SHOGUN
185 }
int32_t index_t
Definition: common.h:72
SGVector< float64_t > m_mmds
static const float64_t INFTY
infinity
Definition: Math.h:2069
#define SG_SWARNING(...)
Definition: SGIO.h:177
static void print_state(libqp_state_T state)
static const float64_t * get_Q_col(uint32_t i)
static SGMatrix< float64_t > m_Q
double float64_t
Definition: common.h:60
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
#define SG_SDEBUG(...)
Definition: SGIO.h:167
SGVector< float64_t > solve() const
Self(SGVector< float64_t > mmds, SGMatrix< float64_t > Q)
int32_t size() const
Definition: SGVector.h:136

SHOGUN Machine Learning Toolbox - Documentation