UFJF - Machine Learning Toolkit  0.51.8
IMA.hpp
Go to the documentation of this file.
1 
6 //
7 // Created by mateus558 on 01/08/17.
8 //
9 
10 #ifndef CLASSIFICATION_ALGORITHMS_SYSTEM_IMA_HPP
11 #define CLASSIFICATION_ALGORITHMS_SYSTEM_IMA_HPP
12 
13 #include <vector>
14 #include <float.h>
15 
16 #include "PrimalClassifier.hpp"
17 #include "DualClassifier.hpp"
18 #include "Perceptron.hpp"
19 
20 namespace mltk{
21  namespace classifier {
25  template<typename T = double>
26  class IMAp : public PrimalClassifier<T> {
27  private:
28  double margin;
30  std::vector<int> svs;
31 
32  public:
33  IMAp() = default;
34 
35  explicit IMAp(const Data <T>& samples, int q = 2, double flexible = 0.0, double margin = 0.0,
36  Solution* initial_solution = nullptr);
37 
38  bool train() override;
39 
40  double evaluate(const Point <T> &p, bool raw_value = false) override;
41 
46  std::vector<int> getSupportVectors() { return svs; }
47  };
48 
52  template<typename T = double>
53  class IMApFixedMargin : public PrimalClassifier<T> {
54  private:
55  int n, maiorn = 0, flagNao1aDim;
56  double margin = 0.0, maiorw = 0.0;
57  unsigned long tMax;
58 
60  std::vector<int> svs;
61 
62  public:
63  IMApFixedMargin() = default;
64  explicit IMApFixedMargin(const mltk::Data<T>& samples, double gamma = 0,
65  Solution *initial_solution = nullptr);
66 
67  bool train() override;
68 
69  double evaluate(const Point <T> &p, bool raw_value = false) override;
70 
71  inline int *getFlagNot1aDim() { return &flagNao1aDim; }
72 
73  inline unsigned long *gettMax() { return &tMax; }
74  };
75 
76  template<typename T = double>
77  class IMADual : public DualClassifier<T> {
78  private:
79  double margin = 0;
81  std::vector<int> svs;
82  public:
83  IMADual() = default;
84  explicit IMADual(KernelType kernel_type, double kernel_param=0,
85  double rate = 1, Solution* initial_solution = nullptr);
86  explicit IMADual(const Data<T>& samples, KernelType kernel_type=KernelType::INNER_PRODUCT, double kernel_param=0,
87  double rate = 1, Solution* initial_solution = nullptr);
88 
89  bool train() override;
94  std::vector<int> getSupportVectors() { return svs; }
95  };
96 
97  template<typename T>
98  IMAp<T>::IMAp(const Data<T> &samples, int q, double flexible, double margin, Solution *initial_solution) {
99  this->samples = mltk::make_data<T>(samples);
100  this->margin = margin;
101  this->flexible = flexible;
102  this->q = q;
103 
104  this->hasInitialSolution = false;
105 
106  if (initial_solution) {
107  this->solution.w = initial_solution->w;
108  this->solution.bias = initial_solution->bias;
109  this->hasInitialSolution = true;
110  } else {
111  if (this->samples) this->w.resize(this->samples->dim());
112  }
113  }
114 
115  template<typename T>
116  bool IMAp<T>::train() {
117  unsigned int tMax = 0;
118  int i, j, n, maiorn = 0, flagNao1aDim = 0, y, it, sign = 1, svs = 0;
119  size_t size = this->samples->size(), dim = this->samples->dim(), t1 = 1, t3 = 1;
120  double gamma = 0.0, secs, bias = 0.0, alpha, rmargin = margin, inc, stime;
121  double min = 0.0, max = 0.0, norm = 1.0, maiorw = 0.0;
122  std::vector<double> w_saved, func;
123  std::vector<int> index = this->samples->getIndex(), fnames = this->samples->getFeaturesNames();
124  auto points = this->samples->points();
125  IMApFixedMargin<T> imapFixMargin;
126  imapFixMargin.setGamma(gamma);
127  Solution tempSol;
128  n = dim;
129  this->rate = 1.0;
130  //Initializing data struct
131  this->solution.norm = 1.0;
132 
133  //Allocating space for w_saved and func
134  w_saved.resize(dim);
135  func.resize(size);
136 
137  //Allocating space for w
138  if (this->hasInitialSolution) {
139  if (this->q == 1)
140  for (this->solution.norm = 0.0, i = 0; i < dim; ++i)
141  this->solution.norm += fabs(this->solution.w[i]);
142  else if (this->q == 2) {
143  for (this->solution.norm = 0.0, i = 0; i < dim; ++i)
144  this->solution.norm += this->solution.w[i] * this->solution.w[i];
145  this->solution.norm = sqrt(this->solution.norm);
146  } else {
147  for (this->solution.norm = 0.0, i = 0; i < dim; ++i)
148  this->solution.norm += std::pow(fabs(this->solution.w[i]), this->q);
149  this->solution.norm = std::pow(this->solution.norm, 1.0 / this->q);
150  }
151 
152  for (i = 0; i < dim; ++i) this->solution.w[i] /= this->solution.norm;
153 
154  this->solution.bias /= this->solution.norm;
155  this->solution.norm = 1;
156  flagNao1aDim = 1;
157  int flag = 0;
158 
159  for (min = DBL_MAX, max = -DBL_MAX, i = 0; i < size; ++i) {
160  y = points[i]->Y();
161  for (func[i] = 0, j = 0; j < dim; ++j)
162  func[i] += this->w[j] * points[i]->X()[j];
163  if (y == 1 && func[i] < min) min = func[i];
164  else if (y == -1 && func[i] > max) max = func[i];
165  }
166 
167  this->solution.bias = -(min + max) / 2.0;
168 
169  for (min = DBL_MAX, max = -DBL_MAX, i = 0; i < size; ++i) {
170  y = points[i]->Y();
171  for (func[i] = this->solution.bias, j = 0; j < dim; ++j)
172  func[i] += this->solution.w[j] * points[i]->X()[j];
173  if (func[i] * y < 0) flag++;
174  if (y == 1 && func[i] < min) min = func[i];
175  else if (y == -1 && func[i] > max) max = func[i];
176  }
177 
178  if (flag) rmargin = 0;
179  else rmargin = fabs(min);
180 
181  if (margin == 0) tMax = this->MAX_UP;
182  else {
183  double raio = mltk::stats::radius(*this->samples, -1, this->q);//data_get_radius(sample, -1, this->q);
184  tMax = (raio * raio - rmargin * rmargin) / std::pow(this->margin - rmargin, 2);
185  if (rmargin == 0) tMax *= 1.5;
186  }
187 
188  *imapFixMargin.gettMax() = tMax;
189  }
190 
191  //Allocating space for index and initializing
192  if (index.size() == 0) {
193  index.resize(size);
194  for (i = 0; i < size; ++i) index[i] = i;
195  }
196  this->samples->setIndex(index);
197  //this->samples->setIndex(index);
198 
199  //Initializing alpha
200  for (i = 0; i < size; ++i) { (*this->samples)[i]->Alpha() = 0.0; }
201  if (this->verbose) {
202  std::cout << "-----------------------------------------------------------------------------\n";
203  std::cout << " pmf steps updates margin norm secs\n";
204  std::cout << "-----------------------------------------------------------------------------\n";
205  }
206 
207  it = 0;
208  this->ctot = 0;
209  this->steps = 0;
210  gamma = 0.0;
211 
212  imapFixMargin.setSamples(this->samples);
213  imapFixMargin.setCtot(this->ctot);
214  imapFixMargin.setqNorm(this->q);
215  imapFixMargin.setSteps(this->steps);
216  imapFixMargin.setGamma(gamma);
217  imapFixMargin.setFlexible(this->flexible);
218  imapFixMargin.setLearningRate(this->rate);
219  if (this->hasInitialSolution) {
220  imapFixMargin.setSolution(this->solution);
221  }
222  imapFixMargin.setMaxUpdates(this->MAX_UP);
223  imapFixMargin.setMaxIterations(this->MAX_IT);
224  imapFixMargin.setMaxTime(this->max_time);
225  this->timer.reset();
226  stime = this->timer.elapsed();
227  imapFixMargin.setStartTime(stime);
228  *imapFixMargin.getFlagNot1aDim() = flagNao1aDim;
229  while (imapFixMargin.train()) {
230  stime += this->timer.elapsed();
231 
232  this->ctot = imapFixMargin.getCtot();
233  this->steps = imapFixMargin.getSteps();
234  //Finding minimum and maximum functional values
235  tempSol = imapFixMargin.getSolution();
236  norm = tempSol.norm;
237  bias = tempSol.bias;
238  func = tempSol.func.X();
239  //std::cout << mltk::Point<double>(func) << std::endl;
240  for (min = DBL_MAX, max = -DBL_MAX, i = 0; i < size; ++i) {
241  y = points[i]->Y();
242  alpha = points[i]->Alpha();
243  if ((func[i] + y * alpha * this->flexible) >= 0 &&
244  min > (func[i] + y * alpha * this->flexible) / norm) {
245  min = (func[i] + y * alpha * this->flexible) / norm;
246  }
247  else if ((func[i] + y * alpha * this->flexible) < 0 &&
248  max < (func[i] + y * alpha * this->flexible) / norm)
249  max = (func[i] + y * alpha * this->flexible) / norm;
250  }
251  //Saving good weights
252  //std::cout << min << " " << max << std::endl;
253  for (i = 0; i < dim; i++) w_saved[i] = tempSol.w[i];
254 
255  //Obtaining real margin
256  rmargin = (fabs(min) > fabs(max)) ? fabs(max) : fabs(min);
257 
258  //Shift no bias
259  double mmargin = (fabs(max) + fabs(min)) / 2.0;
260  if (fabs(max) > fabs(min))
261  tempSol.bias += fabs(mmargin - rmargin);
262  else
263  tempSol.bias -= fabs(mmargin - rmargin);
264 
265  //Obtaining new gamma_f
266  gamma = (min - max) / 2.0;
267  inc = (1 + this->alpha_aprox) * rmargin;
268  if (gamma < inc) gamma = inc;
269  rmargin = mmargin;
270 
271  if (it == 2)
272  t1 = this->ctot;
273  t3 = this->ctot;
274 
275  if (it > 1) {
276  this->rate = sqrt(t1) / sqrt(t3);
277  if (this->verbose) std::cout << "RATE: " << this->rate << "\n";
278  } else if (it == 1 && this->verbose)
279  std::cout << "RATE: " << this->rate << "\n";
280 
281  secs = stime / 1000;
282  if (this->verbose)
283  std::cout << " " << it + 1 << " " << this->steps << " " << this->ctot
284  << " " << rmargin << " " << norm << " " << secs << " ";
285 
286  ++it; //IMA iteration increment
287  //std::cout << tempSol.w << std::endl;
288  imapFixMargin.setGamma(gamma);
289  imapFixMargin.setSolution(tempSol);
290  imapFixMargin.setLearningRate(this->rate);
291  this->svs.clear();
292  for (i = 0; i < size; ++i) {
293  y = points[i]->Y();
294  alpha = points[i]->Alpha();
295  if (alpha > this->EPS * this->rate) { this->svs.push_back(i); }
296  }
297  this->steps = imapFixMargin.getSteps();
298  this->ctot = imapFixMargin.getCtot();
299  this->solution.w = w_saved;
300  this->solution.margin = rmargin;
301  this->solution.norm = norm;
302  this->solution.bias = bias;
303  this->solution.svs = this->svs.size();
304  // break;
305  if(it > this->MAX_IT) break;
306  if(stime >= this->max_time) break;
307  if (flagNao1aDim) break;
308  }
309  this->svs.erase(this->svs.begin(), this->svs.end());
310  for(i = 0; i < size; ++i)
311  {
312  y = points[i]->Y();
313  alpha = points[i]->Alpha();
314  if(alpha > this->EPS * this->rate) { this->svs.push_back(i); }
315  }
316 
317  this->steps = imapFixMargin.getSteps();
318  this->ctot = imapFixMargin.getCtot();
319  this->solution.w.clear();
320  this->solution.w = w_saved;
321  this->solution.margin = rmargin;
322  this->solution.norm = norm;
323  this->solution.bias = bias;
324  this->solution.svs = this->svs.size();
325 
326  if (this->verbose) {
327  std::cout << "\n-----------------------------------------------------------------------------\n";
328  std::cout << "Number of times that the Fixed Margin Perceptron was called: " << it + 1 << "\n";
329  std::cout << "Number of steps through data: " << this->steps << "\n";
330  std::cout << "Number of updates: " << this->ctot << "\n";
331  std::cout << "Margin found: " << rmargin << "\n";
332  std::cout << "Min: " << fabs(min) << " / Max: " << fabs(max) << "\n";
333  std::cout << "Number of Support Vectors: " << this->svs.size() << "\n\n";
334  if (this->verbose >= 2) {
335  for (i = 0; i < dim; ++i) std::cout << "W[" << i << "]: " << w_saved[i] << "\n";
336  std::cout << "Bias: " << this->solution.bias << "\n\n";
337  }
338  }
339 
340  this->samples->resetIndex();
341 
342  if (!it) {
343  if (this->verbose) std::cout << "FMP convergency wasn't reached!\n";
344  return false;
345  }
346  return true;
347  }
348 
349  template<typename T>
350  double IMAp<T>::evaluate(const Point<T> &p, bool raw_value) {
351  double func = 0.0;
352  int i;
353  size_t dim = this->solution.w.size();
354 
355  if(p.size() != dim){
356  std::cerr << "The point must have the same dimension of the feature set! (" << p.X().size() << "," << dim << ")" << std::endl;
357  return 0;
358  }
359 
360  for (func = this->solution.bias, i = 0; i < dim; i++) {
361  func += this->solution.w[i] * p[i];
362  }
363  if (!raw_value) return (func >= this->solution.margin * this->solution.norm) ? 1 : -1;
364  else return func;
365  }
366 
367 
368 
369  template<typename T>
370  IMApFixedMargin<T>::IMApFixedMargin(const mltk::Data<T>& samples, double gamma,
371  Solution *initial_solution) {
372  this->gamma = gamma;
373  this->samples = mltk::make_data<T>(samples);
374 
375  if (initial_solution) {
376  this->w = initial_solution->w.X();
377  this->solution.bias = initial_solution->bias;
378  this->solution.norm = initial_solution->norm;
379  } else {
380  this->w.resize(samples.dim());
381  }
382  }
383 
384  template<typename T>
386  int c, e = 1, i, k, s = 0, j;
387  int t, idx, r;
388  size_t size = this->samples->size(), dim = this->samples->dim();
389  double norm = this->solution.norm, bias = this->solution.bias, lambda = 1, y, time =
390  this->max_time + this->start_time;
391  double sumnorm = 0; //soma das normas para o calculo posterior (nao mais sqrt)
392  double maiorw_temp = 0;
393  int n_temp, sign = 1;
394  bool cond;
395  std::vector<double> func(size, 0.0);
396  std::vector<int> index = this->samples->getIndex();
397  std::vector<T> x;
398  this->timer.reset();
399 
400  if (!this->solution.w.empty())
401  this->w = this->solution.w.X();
402  else this->w.resize(this->samples->dim(), 0.0);
403  while (time - this->timer.elapsed() > 0) {
404  for (e = 0, i = 0; i < size; ++i) {
405  //shuffling data r = i + rand()%(size-i); j = index[i]; idx = index[i] = index[r]; index[r] = j;
406  idx = index[i];
407  x = (*this->samples)[idx]->X();
408  y = (*this->samples)[idx]->Y();
409 
410  //calculating function
411  for (func[idx] = bias, j = 0; j < dim; ++j) {
412  func[idx] += this->w[j] * x[j];
413  }
414 
415  //Checking if the point is a mistake
416  if (y * func[idx] <= this->gamma * norm - (*this->samples)[idx]->Alpha() * this->flexible) {
417  lambda = (norm) ? (1 - this->rate * this->gamma / norm) : 1;
418  for (r = 0; r < size; ++r)
419  (*this->samples)[r]->Alpha() *= lambda;
420 
421  if (this->q == 1.0) //Linf
422  {
423  for (sumnorm = 0, j = 0; j < dim; ++j) {
424  sign = 1;
425  if (this->w[j] < 0) sign = -1;
426  lambda = (norm > 0 && this->w[j] != 0) ? this->gamma * sign : 0;
427  this->w[j] += this->rate * (y * x[j] - lambda);
428  sumnorm += fabs(this->w[j]);
429  }
430  norm = sumnorm;
431  } else if (this->q == 2.0) //L2
432  {
433  for (sumnorm = 0, j = 0; j < dim; ++j) {
434  lambda = (norm > 0 && this->w[j] != 0) ? this->w[j] * this->gamma / norm : 0;
435  this->w[j] += this->rate * (y * x[j] - lambda);
436  sumnorm += this->w[j] * this->w[j];
437  }
438  norm = sqrt(sumnorm);
439  } else if (this->q == -1.0) //L1
440  {
441  maiorw_temp = fabs(this->w[0]);
442  n_temp = 1;
443  for (j = 0; j < dim; ++j) {
444  if (this->maiorw == 0 ||
445  fabs(this->maiorw - fabs(this->w[j])) / this->maiorw < this->EPS) {
446  sign = 1;
447  if (this->w[j] < 0) sign = -1;
448  lambda = (norm > 0 && this->w[j] != 0) ? this->gamma * sign / this->n : 0;
449  this->w[j] += this->rate * (y * x[j] - lambda);
450  } else
451  this->w[j] += this->rate * (y * x[j]);
452 
453  if (j > 0) {
454  if (fabs(maiorw_temp - fabs(this->w[j])) / maiorw_temp < this->EPS)
455  n_temp++;
456  else if (fabs(this->w[j]) > maiorw_temp) {
457  maiorw_temp = fabs(this->w[j]);
458  n_temp = 1;
459  }
460  }
461  }
462  this->maiorw = maiorw_temp;
463  this->n = n_temp;
464  norm = this->maiorw;
465  if (this->n > this->maiorn) this->maiorn = this->n;
466  } else //outras formula��es - Lp
467  {
468  for (sumnorm = 0, j = 0; j < dim; ++j) {
469  lambda = (norm > 0 && this->w[j] != 0) ? this->w[j] * this->gamma *
470  std::pow(fabs(this->w[j]), this->q - 2.0) *
471  std::pow(norm, 1.0 - this->q) : 0;
472  this->w[j] += this->rate * (y * x[j] - lambda);
473  sumnorm += std::pow(fabs(this->w[j]), this->q);
474  }
475  norm = std::pow(sumnorm, 1.0 / this->q);
476  }
477  bias += this->rate * y;
478  (*this->samples)[idx]->Alpha() += this->rate;
479 
480  k = (i > s) ? s++ : e;
481  j = index[k];
482  index[k] = idx;
483  index[i] = j;
484  this->ctot++;
485  e++;
486  } else if (this->steps > 0 && e > 1 && i > s) break;
487  }
488  //std::cout << mltk::Point<double>(this->w) << std::endl;
489  this->steps++; //Number of iterations update
490  //stop criterion
491  if (e == 0) break;
492  if (this->steps > this->MAX_IT) break;
493  if (this->ctot > this->MAX_UP) break;
494  if (this->flagNao1aDim) if (this->ctot > tMax) break;
495  }
496  this->samples->setIndex(index);
497  this->solution.norm = norm;
498  this->solution.bias = bias;
499  this->solution.w = this->w;
500  this->solution.func = func;
501  if (e == 0) return 1;
502  else return 0;
503  }
504 
505  template<typename T>
506  double IMApFixedMargin<T>::evaluate(const Point<T> &p, bool raw_value) {
507  double func = 0.0;
508  int i;
509  size_t dim = this->solution.w.size();
510 
511  if (p.X().size() != dim) {
512  std::cerr << "The point must have the same dimension of the feature set!" << std::endl;
513  return 0;
514  }
515 
516  for (func = this->solution.bias, i = 0; i < dim; i++) {
517  func += this->solution.w[i] * p[i];
518  }
519 
520  if (!raw_value) return (func >= this->solution.margin * this->solution.norm) ? 1 : -1;
521  else return func;
522  }
523 
524  template<typename T>
525  IMADual<T>::IMADual(KernelType kernel_type, double kernel_param, double rate, Solution *initial_solution) {
526  this->samples = nullptr;
527  this->kernel = new Kernel<T>(kernel_type, kernel_param);
528  this->rate = rate;
529 
530  if (initial_solution) {
531  this->solution.w = initial_solution->w;
532  this->solution.bias = initial_solution->bias;
533  this->hasInitialSolution = true;
534  }
535  }
536 
537  template<typename T>
538  IMADual<T>::IMADual(const Data<T> &samples, KernelType kernel_type, double kernel_param, double rate,
539  Solution *initial_solution) {
540  this->samples = make_data<T>(samples);
541  this->kernel = new Kernel<T>(kernel_type, kernel_param);
542  this->rate = rate;
543 
544  if (initial_solution) {
545  this->solution.w = initial_solution->w;
546  this->solution.bias = initial_solution->bias;
547  this->hasInitialSolution = true;
548  } else {
549  this->solution.w.resize(this->samples->dim());
550  }
551  }
552 
553  template<typename T>
555  double rmargin = 0, old_rmargin = 0,secs;
556  size_t i, j, it;
557  size_t sv = 0, size = this->samples->size(), dim = this->samples->dim();
558  double min, max, norm = 0, stime = 0;
559  dMatrix K;
560  std::vector<int> index = this->samples->getIndex();
561  std::vector<double> w_saved(dim), saved_alphas(size), func(size);
562  std::vector<std::shared_ptr<Point<T> > > points = this->samples->points();
563 
564  if(!this->samples){
565  return false;
566  }
567  this->kernel->compute(this->samples);
568  this->timer.reset();
569 
570  //Allocating space for index
571  if (index.size() == 0) {
572  index.resize(size);
573 
574  //Initializing alpha and bias
575  for (i = 0; i < size; ++i) { index[i] = i; }
576  }
577  this->solution.bias = 0;
578  this->solution.w.resize(this->samples->dim());
579 
580  if (this->verbose) {
581  std::cout << "-------------------------------------------------------------------\n";
582  std::cout << " steps updates margin norm svs secs\n";
583  std::cout << "-------------------------------------------------------------------\n";
584  }
585 
586  it = 0;
587  this->ctot = 0;
588  this->steps = 0;
589  this->gamma = 0;
590 
592  Solution sol, *solr;
593 
594  percDual.setSamples(this->samples);
595  percDual.setKernel(this->kernel);
596  percDual.setGamma(this->gamma);
597  percDual.setLearningRate(this->rate);
598  percDual.setMaxTime(this->max_time);
599  percDual.setMaxUpdates(this->MAX_UP);
600  percDual.setMaxIterations(this->MAX_IT);
601 
602  stime = this->timer.elapsed();
603 
604  while (percDual.train()) {
605  points = percDual.getSamples()->points();
606  stime += percDual.getElapsedTime();
607  this->samples->setIndex(percDual.getSamples()->getIndex());
608  //Finding minimum and maximum functional values
609  this->ctot = percDual.getCtot();
610  this->steps = percDual.getSteps();
611  solr = percDual.getSolutionRef();
612  norm = solr->norm;
613  this->solution.bias = solr->bias;
614 
615  for (sv = 0, min = DBL_MAX, max = -DBL_MAX, i = 0; i < size; ++i) {
616  if (points[i]->Alpha() > this->EPS * this->rate) {
617  sv++;
618  saved_alphas[i] = points[i]->Alpha();
619  }
620  else { saved_alphas[i] = 0.0; }
621  if (solr->func[i] >= 0 && min > solr->func[i] / norm) min = solr->func[i] / norm;
622  else if (solr->func[i] < 0 && max < solr->func[i] / norm) max = solr->func[i] / norm;
623  }
624 
625  //Obtaining real margin
626  rmargin = (fabs(min) > fabs(max)) ? fabs(max) : fabs(min);
627 
628  //Obtaining new gamma_f
629  this->gamma = (min - max) / 2.0;
630  if (this->gamma < this->MIN_INC * rmargin) this->gamma = this->MIN_INC * rmargin;
631 
632  percDual.setGamma(this->gamma);
633  secs = stime / 1000;
634 
635  if (this->verbose)
636  std::cout << " " << this->steps << " " << this->ctot << " " << rmargin << " "
637  << norm << " " << sv << " " << secs << std::endl;
638  ++it; //IMA iteration increment
639  if((it > 0) && (rmargin - old_rmargin) < this->EPS){
640  break;
641  }
642  old_rmargin = rmargin;
643  }
644  this->ctot = percDual.getCtot();
645  this->steps = percDual.getSteps();
646  sol = percDual.getSolution();
647  this->alpha = percDual.getAlphaVector();
648  norm = sol.norm;
649  this->solution.bias = sol.bias;
650  func = sol.func.X();
651 
652  for (i = 0; i < size; ++i) {
653  if (points[i]->Alpha() > this->EPS * this->rate) { this->svs.push_back(i); }
654  }
655 
656  for (i = 0; i < size; ++i) points[i]->Alpha() = saved_alphas[i];
657 
658  this->solution.norm = this->kernel->norm(*this->samples);
659 
660  /*recuperando o vetor DJ -- "pesos" das componentes*/
661  int kernel_type = this->kernel->getType();
662  double kernel_param = this->kernel->getParam();
663 
664  if (kernel_type == 0)
665  for (i = 0; i < dim; i++) {
666  for (j = 0; j < size; j++) {
667  w_saved[i] += points[j]->Alpha() * points[j]->Y() * points[j]->X()[i];
668  }
669  }
670  else {
671  if (kernel_type == 1 && kernel_param == 1)
673  else
674  w_saved = DualClassifier<T>::getDualWeight().X();
675  if (it) {
676  this->solution.w = mltk::normalize(this->solution.w, 2.0).X();
677  }
678  }
679 
680  this->solution.w = w_saved;
681  this->solution.margin = rmargin;
682  this->solution.alpha.assign(size, 0.0);
683  for(i = 0; i < size; i++){
684  this->solution.alpha[i] = points[i]->Alpha();
685  }
686 
687  if (this->verbose) {
688  std::cout << "-------------------------------------------------------------------\n";
689  std::cout << "Number of times that the Fixed Margin Perceptron was called:: " << it + 1 << std::endl;
690  std::cout << "Number of steps through data: " << this->steps << std::endl;
691  std::cout << "Number of updates: " << this->ctot << std::endl;
692  std::cout << "Number of support vectors: " << sv << std::endl;
693  std::cout << "Margin found: " << rmargin << "\n\n";
694  if (this->verbose > 2) {
695  std::vector<int> fnames = this->samples->getFeaturesNames();
696  for (i = 0; i < dim; i++)
697  std::cout << "W[" << i << "]: " << this->solution.w[i] << std::endl;
698  std::cout << "Bias: " << this->solution.bias << "\n\n";
699  }
700  }
701 
702  if (!it) {
703  if (this->verbose) std::cout << "FMP convergency wasn't reached!\n";
704  return false;
705  }
706  return true;
707  }
708  }
709 }
710 
711 #endif //CLASSIFICATION_ALGORITHMS_SYSTEM_IMAP_HPP
size_t size() const
Returns the size of the dataset.
Definition: Data.hpp:208
void resetIndex()
Reset the index vector.
Definition: Data.hpp:1759
void setIndex(std::vector< int > index)
Set the index vector for the data.
Definition: Data.hpp:1754
std::vector< int > getFeaturesNames() const
Returns the features names.
Definition: Data.hpp:1675
std::vector< int > getIndex() const
Returns the vector of indexes.
Definition: Data.hpp:1695
size_t dim() const
Returns the dimension of the dataset.
Definition: Data.hpp:213
std::vector< std::shared_ptr< Point< T > > > points()
Returns a shared pointer to the vector of Points of the sample.
Definition: Data.hpp:1685
void setMaxUpdates(int max_up)
setMaxIterations Set the max number of updates of the Learner.
Definition: Learner.hpp:205
std::shared_ptr< Data< T > > samples
Samples used in the model training.
Definition: Learner.hpp:21
void setLearningRate(double learning_rate)
Set the learning rate of the Learner.
Definition: Learner.hpp:210
void setCtot(int _ctot)
Set the partial number of updates of the Learner.
Definition: Learner.hpp:170
virtual void setSamples(const Data< T > &data)
setSamples Set the samples used by the Learner.
Definition: Learner.hpp:150
auto getSamples()
Get the Data used by the learner.
Definition: Learner.hpp:105
double rate
Learning rate.
Definition: Learner.hpp:23
void setStartTime(double stime)
setStartTime Set the initial time of the Learner.
Definition: Learner.hpp:180
void setMaxIterations(int max_it)
setMaxIterations Set the max number of iterations of the Learner.
Definition: Learner.hpp:195
void setSteps(int _steps)
Set the partial number of steps used in the training phase of the Learner.
Definition: Learner.hpp:165
int getCtot() const
Get the total number of updates of the Learner.
Definition: Learner.hpp:115
void setMaxTime(double maxtime)
Set the max time of execution.
Definition: Learner.hpp:185
int getSteps() const
getSteps Returns the number of steps through the data by the Learner.
Definition: Learner.hpp:120
double getElapsedTime() const
Get the elapsed time in the training phase of the Learner.
Definition: Learner.hpp:110
Rep const & X() const
Returns the attributes representation of the point (std::vector by default).
Definition: Point.hpp:139
std::size_t size() const
Returns the dimension of the point.
Definition: Point.hpp:133
Definition: Solution.hpp:13
double bias
Bias of the solution.
Definition: Solution.hpp:23
double norm
Norm of the solution.
Definition: Solution.hpp:29
mltk::Point< double > w
Weights vector.
Definition: Solution.hpp:17
Solution * getSolutionRef()
getSolution Returns a reference to the solution of the classifier.
Definition: classifier/Classifier.hpp:58
Solution getSolution() const
getSolution Returns the solution of the classifier.
Definition: classifier/Classifier.hpp:52
void setGamma(double gamma)
Set the gamma (margin) of the classifier.
Definition: classifier/Classifier.hpp:67
void setSolution(Solution solution)
setSolution Set a solution for the classifier.
Definition: classifier/Classifier.hpp:79
double gamma
Classifier margin.
Definition: classifier/Classifier.hpp:27
Definition: DualClassifier.hpp:16
std::vector< double > getAlphaVector()
Get the vector of alphas.
Definition: DualClassifier.hpp:100
Point< double > getDualWeightProdInt()
Compute the weights with inner product of the dual classifier.
Definition: DualClassifier.hpp:155
Point< double > getDualWeight()
Compute the weights of the dual classifier (with H matrix).
Definition: DualClassifier.hpp:123
void setKernel(Kernel< T > *K)
setKernel Set the kernel used by the dual classifier.
Definition: DualClassifier.hpp:56
Definition: IMA.hpp:77
bool train() override
Function that execute the training phase of a Learner.
Definition: IMA.hpp:554
std::vector< int > getSupportVectors()
Get the indexes of support vectors.
Definition: IMA.hpp:94
Wrapper for the implementation of the Incremental Margin Algorithm primal with fixed margin.
Definition: IMA.hpp:53
bool train() override
Function that execute the training phase of a Learner.
Definition: IMA.hpp:385
double evaluate(const Point< T > &p, bool raw_value=false) override
Returns the class of a feature point based on the trained Learner.
Definition: IMA.hpp:506
Wrapper for the implementation of the Incremental Margin Algorithm primal.
Definition: IMA.hpp:26
std::vector< int > getSupportVectors()
Get the indexes of the support vectors.
Definition: IMA.hpp:46
double evaluate(const Point< T > &p, bool raw_value=false) override
Returns the class of a feature point based on the trained Learner.
Definition: IMA.hpp:350
bool train() override
Function that execute the training phase of a Learner.
Definition: IMA.hpp:116
Wrapper for the implementation of the Perceptron dual with fixed margin algorithm.
Definition: Perceptron.hpp:69
bool train() override
Function that execute the training phase of a Learner.
Definition: Perceptron.hpp:480
Definition: PrimalClassifier.hpp:14
void setqNorm(double q)
setqNorm Set the q norm used by the classifier. (Euclidean norm is the default)
Definition: PrimalClassifier.hpp:85
double q
Norm used in the classification. (Euclidean Norm is the default)
Definition: PrimalClassifier.hpp:20
void setFlexible(double flexible)
Set flexibity of the classifier.
Definition: PrimalClassifier.hpp:97
double flexible
Flexibility.
Definition: PrimalClassifier.hpp:24
A helper class to measure execution time for benchmarking purposes.
Definition: ThreadPool.hpp:503
double radius(const Data< T > &data, int feat, double q)
Returns radius of the ball that circ. the data.
Definition: Statistics.hpp:186
UFJF-MLTK main namespace for core functionalities.
Definition: classifier/Classifier.hpp:11
T min(const Point< T, R > &p)
Returns the min value of the point.
Definition: Point.hpp:557
T max(const Point< T, R > &p)
Returns the max value of the point.
Definition: Point.hpp:544
Point< T > normalize(Point< T > p, const double q)
normalize Normalize a vector using a Lp-norm.
Definition: Point.hpp:663