23 #ifndef DOXYGEN_SHOULD_SKIP_THIS 24 struct S_THREAD_PARAM_KERNEL_MACHINE
36 #endif // DOXYGEN_SHOULD_SKIP_THIS 48 int32_t num_sv=svs.
vlen;
66 int32_t num_sv = svs.
vlen;
217 int32_t * sv_idx = SG_MALLOC(int32_t, num_sv);
220 for(int32_t i=0; i<num_sv; i++)
232 SG_ERROR(
"initialization of kernel optimization failed\n")
237 SG_ERROR(
"initialization of kernel optimization failed\n")
256 SG_DEBUG(
"entering %s::apply_get_outputs(%s at %p)\n",
259 REQUIRE(
kernel,
"%s::apply_get_outputs(): No kernel assigned!\n")
263 SG_ERROR(
"%s: No vectors on left hand side (%s). This is probably due to" 264 " an implementation error in %s, where it was forgotten to set " 265 "the data (m_svs) indices\n",
get_name(),
272 REQUIRE(lhs,
"%s::apply_get_outputs(): No left hand side specified\n",
293 SG_DEBUG(
"computing output on %d test examples\n", num_vectors)
306 SG_DEBUG(
"Batch evaluation enabled\n")
311 int32_t* idx=SG_MALLOC(int32_t, num_vectors);
314 for (int32_t i=0; i<num_vectors; i++)
324 output.
vector, get_num_support_vectors(), sv_idx, sv_weight);
330 for (int32_t i=0; i<num_vectors; i++)
340 int32_t num_threads=1;
346 S_THREAD_PARAM_KERNEL_MACHINE params;
347 params.kernel_machine=
this;
348 params.result = output.
vector;
350 params.end=num_vectors;
352 params.indices = NULL;
353 params.indices_len = 0;
359 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
360 S_THREAD_PARAM_KERNEL_MACHINE* params = SG_MALLOC(S_THREAD_PARAM_KERNEL_MACHINE, num_threads);
361 int32_t step= num_vectors/num_threads;
365 for (t=0; t<num_threads-1; t++)
367 params[t].kernel_machine =
this;
368 params[t].result = output.
vector;
369 params[t].start = t*step;
370 params[t].end = (t+1)*step;
371 params[t].verbose =
false;
372 params[t].indices = NULL;
373 params[t].indices_len = 0;
374 pthread_create(&threads[t], NULL,
378 params[t].kernel_machine =
this;
379 params[t].result = output.
vector;
380 params[t].start = t*step;
381 params[t].end = num_vectors;
382 params[t].verbose =
true;
383 params[t].indices = NULL;
384 params[t].indices_len = 0;
387 for (t=0; t<num_threads-1; t++)
388 pthread_join(threads[t], NULL);
398 SG_INFO(
"prematurely stopped. \n")
404 SG_DEBUG(
"leaving %s::apply_get_outputs(%s at %p)\n",
431 S_THREAD_PARAM_KERNEL_MACHINE* params = (S_THREAD_PARAM_KERNEL_MACHINE*) p;
436 for (int32_t vec=params->start; vec<params->end; vec++)
438 for (int32_t vec=params->start; vec<params->end &&
444 int32_t num_vectors=params->end - params->start;
445 int32_t v=vec-params->start;
446 if ( (v% (num_vectors/100+1))== 0)
451 index_t idx=params->indices ? params->indices[vec] : vec;
452 result[vec] = kernel_machine->
apply_one(idx);
461 SG_ERROR(
"kernel is needed to store SV features.\n")
467 SG_ERROR(
"kernel lhs is needed to store SV features.\n")
491 SG_ERROR(
"CKernelMachine::train_locked() call data_lock() before!\n")
540 SG_ERROR(
"CKernelMachine::apply_locked() call data_lock() before!\n")
545 int32_t num_inds=indices.
vlen;
560 int32_t num_threads=1;
566 S_THREAD_PARAM_KERNEL_MACHINE params;
567 params.kernel_machine=
this;
568 params.result=output.
vector;
573 params.indices=indices.
vector;
574 params.indices_len=indices.
vlen;
582 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
583 S_THREAD_PARAM_KERNEL_MACHINE* params=SG_MALLOC(S_THREAD_PARAM_KERNEL_MACHINE, num_threads);
584 int32_t step= num_inds/num_threads;
587 for (t=0; t<num_threads-1; t++)
589 params[t].kernel_machine=
this;
590 params[t].result=output.
vector;
593 params[t].start=t*step;
594 params[t].end=(t+1)*step;
595 params[t].indices=indices.
vector;
596 params[t].indices_len=indices.
vlen;
598 params[t].verbose=
false;
603 params[t].kernel_machine=
this;
604 params[t].result=output.
vector;
607 params[t].start=t*step;
608 params[t].end=num_inds;
609 params[t].indices=indices.
vector;
610 params[t].indices_len=indices.
vlen;
612 params[t].verbose=
true;
615 for (t=0; t<num_threads-1; t++)
616 pthread_join(threads[t], NULL);
625 SG_INFO(
"prematurely stopped.\n")
636 SG_ERROR(
"The kernel is not initialized\n")
638 SG_ERROR(
"Locking is not supported (yet) with combined kernel. Please disable it in cross validation")
684 void CKernelMachine::init()
virtual float64_t apply_one(int32_t num)
virtual const char * get_name() const =0
SGVector< float64_t > apply_get_outputs(CFeatures *data)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGVector< int32_t > m_svs
int32_t get_num_support_vectors()
void set_bias_enabled(bool enable_bias)
virtual CBinaryLabels * apply_locked_binary(SGVector< index_t > indices)
Real Labels are real-valued labels.
virtual void add_row_subset(SGVector< index_t > subset)
bool get_show_progress() const
The class Labels models labels, i.e. class assignments of objects.
The Custom Kernel allows for custom user provided kernel matrices.
int32_t get_num_threads() const
virtual CRegressionLabels * apply_regression(CFeatures *data=NULL)
SGVector< int32_t > get_support_vectors()
virtual int32_t get_num_vectors() const =0
CCustomKernel * m_custom_kernel
float64_t kernel(int32_t idx_a, int32_t idx_b)
static void * apply_helper(void *p)
virtual bool train_machine(CFeatures *data=NULL)
CKernel * m_kernel_backup
A generic KernelMachine interface.
bool is_data_locked() const
virtual int32_t get_num_vec_lhs()
virtual void remove_all_row_subsets()
A generic learning machine interface.
bool get_is_initialized()
void set_support_vectors(SGVector< int32_t > svs)
virtual bool train_locked(SGVector< index_t > indices)
SGVector< float64_t > m_alpha
virtual void remove_col_subset()
bool has_property(EKernelProperty p)
virtual void add_col_subset(SGVector< index_t > subset)
virtual void store_model_features()
Class SGObject is the base class of all shogun objects.
bool get_batch_computation_enabled()
void set_bias(float64_t bias)
void set_batch_computation_enabled(bool enable)
static void clear_cancel()
virtual SGVector< float64_t > apply_locked_get_output(SGVector< index_t > indices)
bool set_alpha(int32_t idx, float64_t val)
virtual void data_unlock()
virtual void data_unlock()
virtual void data_lock(CLabels *labs, CFeatures *features)
virtual void remove_subset()
void range_fill(T start=0)
virtual float64_t compute_optimized(int32_t vector_idx)
float64_t get_alpha(int32_t idx)
bool use_batch_computation
virtual void add_subset(SGVector< index_t > subset)
bool set_support_vector(int32_t idx, int32_t val)
bool init_kernel_optimization()
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
virtual bool supports_locking() const
virtual int32_t get_num_vec_rhs()
SGVector< float64_t > get_alphas()
all of classes and functions are contained in the shogun namespace
bool get_linadd_enabled()
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
virtual CFeatures * copy_subset(SGVector< index_t > indices)
virtual CRegressionLabels * apply_locked_regression(SGVector< index_t > indices)
void set_alphas(SGVector< float64_t > alphas)
The class Features is the base class of all feature objects.
#define SG_SPROGRESS(...)
void set_linadd_enabled(bool enable)
Binary Labels for binary classification.
void set_kernel(CKernel *k)
virtual ~CKernelMachine()
virtual const char * get_name() const
SGVector< T > clone() const
virtual CBinaryLabels * apply_binary(CFeatures *data=NULL)
bool create_new_model(int32_t num)
virtual void data_lock(CLabels *labs, CFeatures *features=NULL)