G+Smo  25.01.0
Geometry + Simulation Modules
 
Loading...
Searching...
No Matches
gsOptim.h
Go to the documentation of this file.
1
17
18# define Eigen gsEigen
19#define OPTIM_ENABLE_EIGEN_WRAPPERS
20#include <optim/header_only_version/optim.hpp>
21# undef Eigen
22
23using namespace optim;
24
25#pragma once
26
27namespace gismo
28{
29
30// Forward declarations
31template <typename T> class gsOptimBFGS;
32template <typename T> class gsOptimLBFGS;
33template <typename T> class gsOptimCG;
34template <typename T> class gsOptimGD;
35template <typename T> class gsOptimNewton;
36template <typename T> class gsOptimNM;
37template <typename T> class gsOptimDE;
38template <typename T> class gsOptimDEPRMM;
39template <typename T> class gsOptimPSO;
40template <typename T> class gsOptimPSODV;
41template <typename T> class gsOptimSUMT;
42
43/*
44 * TODO:
45 * - What to do with root finding
46 */
47
48
54template <typename T>
56{
58 :
59 m_op(problem)
60 { }
61
62 T operator() (const optim::ColVec_t& vals_in, optim::ColVec_t* grad_out, void* opt_data)
63 {
64 gsAsConstVector<T> in(vals_in.data(),vals_in.rows());
65 if (grad_out)
66 {
67 grad_out->resize(vals_in.size());
68 gsAsVector<T> out(grad_out->data(),grad_out->size());
69 m_op.gradObj_into(in, out);
70 }
71 return m_op.evalObj(in);
72 }
73
74private:
75 gsOptProblem<T> & m_op;
76};
77
83template <typename T>
85{
87 :
88 m_op(problem)
89 { }
90
91 optim::ColVec_t operator() (const optim::ColVec_t& vals_in, optim::Mat_t* jacob_out, void* constr_data)
92 {
93 gsAsConstVector<T> in(vals_in.data(),vals_in.size());
94 if (jacob_out)
95 {
96 std::vector<T> jacTmp(vals_in.size()*m_op.numConstraints());
97 gsAsVector<T> jacOut(jacTmp);
98 m_op.jacobCon_into(in, jacOut);
99
101 // The matrix is organized as follows (see gsOptProblem.h, computeJacStructure):
102 // - row i, constraint i
103 // - col j, design variable j
104 jacob_out->resize(m_op.numConstraints(),vals_in.size());
105 gsAsMatrix<T> out(jacob_out->data(),jacob_out->rows(),jacob_out->cols());
106 out.setZero();
107 std::vector<index_t> conJacRows = m_op.conJacRows();
108 std::vector<index_t> conJacCols = m_op.conJacCols();
109 for (size_t k=0; k!=conJacRows.size(); k++)
110 out(conJacRows[k],conJacCols[k]) = jacTmp[k];
111 }
112 gsVector<T> conTmp(m_op.numConstraints());
113 gsAsVector<T> conOut(conTmp.data(),conTmp.size());
114 m_op.evalCon_into(in,conOut);
115 return conTmp;
116 }
117
118private:
119 gsOptProblem<T> & m_op;
120};
121
122
128template <typename T>
129class gsOptim : public gsOptimizer<T>
130{
131 using Base = gsOptimizer<T>;
132public:
133
134 typedef memory::unique_ptr<gsOptim> uPtr;
135
136 typedef gsOptimBFGS<T> BFGS;
137 typedef gsOptimLBFGS<T> LBFGS;
138 typedef gsOptimCG<T> CG;
139 typedef gsOptimGD<T> GD;
140 // typedef gsOptimNewton<T> Newton;
141 typedef gsOptimNM<T> NM;
142 typedef gsOptimDE<T> DE;
143 typedef gsOptimDEPRMM<T> DEPRMM;
144 typedef gsOptimPSO<T> PSO;
145 typedef gsOptimPSODV<T> PSODV;
146 typedef gsOptimSUMT<T> SUMT;
147
148public:
149
152
153
160 :
161 Base(problem),
162 m_success(false)
163 {
164 this->defaultOptions();
165 }
166
175 static uPtr get(const std::string & slv, gsOptProblem<T> * problem)
176 {
177 if (slv=="BFGS") return uPtr(new BFGS(problem));
178 if (slv=="LBFGS") return uPtr(new LBFGS(problem));
179 if (slv=="CG") return uPtr(new CG(problem));
180 if (slv=="GD") return uPtr(new GD(problem));
181 // if (slv=="Newton") return uPtr(new Newton(problem));
182 if (slv=="NM") return uPtr(new NM(problem));
183 if (slv=="DE") return uPtr(new DE(problem));
184 if (slv=="DEPRMM") return uPtr(new DEPRMM(problem));
185 if (slv=="PSO") return uPtr(new PSO(problem));
186 if (slv=="PSODV") return uPtr(new PSODV(problem));
187 if (slv=="SUMT") return uPtr(new SUMT(problem));
188 GISMO_ERROR("Solver \'"<< slv << "\' not known to G+Smo");
189 }
190
192 virtual void defaultOptions()
193 {
194 Base::defaultOptions();
195
196 m_options.addInt("ConfFailureSwitch","Switch for convergence failure",0);
197 m_options.addReal("GradErrTol","Gradient error tolerance (default: 1e-8)",1e-8);
198 m_options.addReal("RelSolChangeTol","Relative solution change tolerance (default: 1e-14)",1e-14);
199 m_options.addReal("RelObjFnChangeTol","Relative objective function change tolerance (default: 1e-8)",1e-8);
200 }
201
203 virtual void getOptions()
204 {
205 Base::getOptions();
206
207 // See: https://optimlib.readthedocs.io/en/latest/settings.html?highlight=iterations#main-settings
208
209 // RNG seeding
210 // size_t rng_seed_value = std::random_device{}();
211
212 // print and convergence options
213 m_optimSettings.print_level = m_verbose;
214 m_optimSettings.conv_failure_switch = m_options.getInt("ConfFailureSwitch");
215
216 // error tolerance and maxiumum iterations
217 m_optimSettings.iter_max = m_maxIterations;
218
219 m_optimSettings.grad_err_tol = m_options.getReal("GradErrTol");
220 m_optimSettings.rel_sol_change_tol = m_options.getReal("RelSolChangeTol");
221 m_optimSettings.rel_objfn_change_tol = m_options.getReal("RelObjFnChangeTol");
222
223 // bounds
224 // See solve function
225 }
226
228 virtual void solve(const gsMatrix<T> & initialGuess)
229 {
230 // Get the bounds
231 this->getOptions();
232 this->setConstraints();
233 GISMO_ASSERT(initialGuess.cols()==1,"The initial guess should have vector format");
234 gsVector<T> x = initialGuess.col(0);
235 m_success = callOptim(x, *m_op, m_optimSettings);
236 m_curDesign = x;
237 m_numIterations = m_optimSettings.opt_iter;
238 m_finalObjective = m_optimSettings.opt_fn_value;
239 }
240
241
243 bool success() { return m_success; }
244
245protected:
246
248 virtual bool callOptim(gsVector<T> & initialGuess, gsOptProblem<T> & op, optim::algo_settings_t & settings) = 0;
249
252 {
253 if(m_op->desLowerBounds().size()!=0)
254 {
255 m_optimSettings.lower_bounds = m_op->desLowerBounds();
256 m_optimSettings.upper_bounds = m_op->desUpperBounds();
257 m_optimSettings.vals_bound = true;
258 }
259 }
260
261// Members taken from gsOptimizer
262protected:
263 using Base::m_op;
264 using Base::m_numIterations;
265 using Base::m_finalObjective;
266 using Base::m_curDesign;
267 using Base::m_options;
268 using Base::m_verbose;
269 using Base::m_maxIterations;
270
271 using Base::defaultOptions;
272 using Base::getOptions;
273
274protected:
275 // Optim options
276 optim::algo_settings_t m_optimSettings;
277 bool m_success;
278};
279
285template <typename T>
286class gsOptimBFGS : public gsOptim<T>
287{
288public:
289 using Base = gsOptim<T>;
290
291public:
292
294 gsOptimBFGS(gsOptProblem<T> * problem) : Base(problem)
295 {
296 this->defaultOptions();
297 }
298
301 gsOptProblem<T> & op,
302 optim::algo_settings_t & optimSettings) override
303 { return optim::bfgs(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
304
306 void defaultOptions() override
307 {
309 m_options.addReal("WolfeCons1","Line search tuning parameter that controls the tolerance on the Armijo sufficient decrease condition.",1e-3);
310 m_options.addReal("WolfeCons2","Line search tuning parameter that controls the tolerance on the curvature condition.",0.9);
311 }
312
314 void getOptions() override
315 {
317 m_optimSettings.bfgs_settings.wolfe_cons_1 = m_options.getReal("WolfeCons1");
318 m_optimSettings.bfgs_settings.wolfe_cons_2 = m_options.getReal("WolfeCons2");
319 }
320
321protected:
322
323 using Base::m_options;
324 using Base::m_optimSettings;
325};
326
332template <typename T>
333class gsOptimLBFGS : public gsOptim<T>
334{
335public:
336 using Base = gsOptim<T>;
337
338public:
339
341 gsOptimLBFGS(gsOptProblem<T> * problem) : Base(problem)
342 {
343 this->defaultOptions();
344 }
345
348 gsOptProblem<T> & op,
349 optim::algo_settings_t & optimSettings) override
350 { return optim::lbfgs(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
351
353 void defaultOptions() override
354 {
356 m_options.addInt("ParM","The number of past gradient vectors to use when forming the approximate Hessian matrix.",10);
357 m_options.addReal("WolfeCons1","Line search tuning parameter that controls the tolerance on the Armijo sufficient decrease condition.",1e-3);
358 m_options.addReal("WolfeCons2","Line search tuning parameter that controls the tolerance on the curvature condition.",0.9);
359 }
360
362 void getOptions() override
363 {
365 m_optimSettings.lbfgs_settings.par_M = m_options.getInt("ParM");
366 m_optimSettings.lbfgs_settings.wolfe_cons_1 = m_options.getReal("WolfeCons1");
367 m_optimSettings.lbfgs_settings.wolfe_cons_2 = m_options.getReal("WolfeCons2");
368 }
369
370protected:
371
372 using Base::m_options;
373 using Base::m_optimSettings;
374};
375
381template <typename T>
382class gsOptimCG : public gsOptim<T>
383{
384public:
385 using Base = gsOptim<T>;
386
387public:
388
390 gsOptimCG(gsOptProblem<T> * problem) : Base(problem)
391 {
392 this->defaultOptions();
393 }
394
397 gsOptProblem<T> & op,
398 optim::algo_settings_t & optimSettings) override
399 { return optim::cg(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
400
402 void defaultOptions() override
403 {
405 m_options.addInt("UpdateMethod","Update method: 1) Fletcher–Reeves, 2) Polak-Ribiere, 3) FR-PR Hybrid, 4) Hestenes-Stiefel, 5) Dai-Yuan, 6) Hager-Zhang.",2);
406 m_options.addReal("RestartThreshold","parameter ν from step 2 in the algorithm description.",0.1);
407 m_options.addSwitch("UseRelSolChangeCrit","whether to enable the `rel_sol_change_tol` stopping criterion.",false);
408 m_options.addReal("WolfeCons1","Line search tuning parameter that controls the tolerance on the Armijo sufficient decrease condition.",1e-3);
409 m_options.addReal("WolfeCons2","Line search tuning parameter that controls the tolerance on the curvature condition.",0.9);
410 }
411
413 void getOptions() override
414 {
416 m_optimSettings.cg_settings.method = m_options.getInt("UpdateMethod");
417 m_optimSettings.cg_settings.restart_threshold = m_options.getReal("RestartThreshold");
418 m_optimSettings.cg_settings.use_rel_sol_change_crit = m_options.getSwitch("UseRelSolChangeCrit");
419 m_optimSettings.cg_settings.wolfe_cons_1 = m_options.getReal("WolfeCons1");
420 m_optimSettings.cg_settings.wolfe_cons_2 = m_options.getReal("WolfeCons2");
421 }
422
423protected:
424
425 using Base::m_options;
426 using Base::m_optimSettings;
427};
428
434template <typename T>
435class gsOptimGD : public gsOptim<T>
436{
437public:
438 using Base = gsOptim<T>;
439
440public:
441
443 gsOptimGD(gsOptProblem<T> * problem) : Base(problem)
444 {
445 this->defaultOptions();
446 }
447
450 gsOptProblem<T> & op,
451 optim::algo_settings_t & optimSettings) override
452 { return optim::gd(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
453
455 void defaultOptions() override
456 {
458
459 m_options.addInt("Method","Method to use: 0) Vanilla GD, 1) GD with momentum, 2) Nesterov accelerated gradient descent, 3) AdaGrad. 4) RMSProp, 5) AdaDelta. 6) Adam (adaptive moment estimation) and AdaMax, 7) Nadam (adaptive moment estimation) and NadaMax",0);
460 m_options.addReal("StepSize","The learning rate.",0.1);
461 m_options.addSwitch("StepDecay","Whether to use step decay.",false);
462 m_options.addInt("StepDecayPeriods","Number of periods after which to decay the step size.",10);
463 m_options.addReal("StepDecayValue","The value by which to decay the step size.",0.5);
464 m_options.addReal("Momentum","The momentum parameter.",0.9);
465 m_options.addReal("AdaNormTerm","The normalization term for AdaGrad.",1.0e-08);
466 m_options.addReal("AdaRho","The rho parameter for AdaGrad.",0.9);
467 m_options.addSwitch("AdaMax","Whether to use AdaMax.",false);
468 m_options.addReal("AdamBeta1","The beta1 parameter for Adam.",0.9);
469 m_options.addReal("AdamBeta2","The beta2 parameter for Adam.",0.999);
470 m_options.addSwitch("ClipGrad","Whether to clip the gradient.",false);
471 m_options.addSwitch("ClipMaxNorm","Whether to clip the gradient norm.",false);
472 m_options.addSwitch("ClipMinNorm","Whether to clip the gradient norm.",false);
473 m_options.addInt("ClipNormType","The type of norm to use for clipping.",2);
474 m_options.addReal("ClipNormBound","The bound for the norm used for clipping.",5.0);
475 }
476
478 void getOptions() override
479 {
481
482 m_optimSettings.gd_settings.method = m_options.getInt("Method");
483
484 // step size, or 'the learning rate'
485 m_optimSettings.gd_settings.par_step_size = m_options.getReal("StepSize");
486
487 // decay
488 m_optimSettings.gd_settings.step_decay = m_options.getSwitch("StepDecay");
489 m_optimSettings.gd_settings.step_decay_periods = m_options.getInt("StepDecayPeriods");
490 m_optimSettings.gd_settings.step_decay_val = m_options.getReal("StepDecayValue");
491
492 // momentum parameter
493 m_optimSettings.gd_settings.par_momentum = m_options.getReal("Momentum");
494
495 // Ada parameters
496 m_optimSettings.gd_settings.par_ada_norm_term = m_options.getReal("AdaNormTerm");
497 m_optimSettings.gd_settings.par_ada_rho = m_options.getReal("AdaRho");
498 m_optimSettings.gd_settings.ada_max = m_options.getSwitch("AdaMax");
499
500 // Adam parameters
501 m_optimSettings.gd_settings.par_adam_beta_1 = m_options.getReal("AdamBeta1");
502 m_optimSettings.gd_settings.par_adam_beta_2 = m_options.getReal("AdamBeta2");
503
504 // gradient clipping settings
505 m_optimSettings.gd_settings.clip_grad = m_options.getSwitch("ClipGrad");
506 m_optimSettings.gd_settings.clip_max_norm = m_options.getSwitch("ClipMaxNorm");
507 m_optimSettings.gd_settings.clip_min_norm = m_options.getSwitch("ClipMinNorm");
508 m_optimSettings.gd_settings.clip_norm_type = m_options.getInt("ClipNormType");
509 m_optimSettings.gd_settings.clip_norm_bound = m_options.getReal("ClipNormBound");
510 }
511
512protected:
513
514 using Base::m_options;
515 using Base::m_optimSettings;
516};
517
518// NOTE: OMMITTED SINCE IT NEEDS HESSIAN!!
519//
520// template <typename T>
521// class gsOptimNewton : public gsOptim<T>
522// {
523// public:
524// using Base = gsOptim<T>;
525
526// public:
527
528// gsOptimNewton(gsOptProblem<T> * problem) : Base(problem)
529// {
530// this->defaultOptions();
531// }
532
533// bool callOptim( gsVector<T> & x,
534// gsOptProblem<T> & op,
535// optim::algo_settings_t & optimSettings) override
536// { return optim::newton(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
537
538// protected:
539
540// using Base::m_options;
541// using Base::m_optimSettings;
542// };
543
549template <typename T>
550class gsOptimNM : public gsOptim<T>
551{
552public:
553 using Base = gsOptim<T>;
554
555public:
556
558 gsOptimNM(gsOptProblem<T> * problem) : Base(problem)
559 {
560 this->defaultOptions();
561 }
562
565 gsOptProblem<T> & op,
566 optim::algo_settings_t & optimSettings) override
567 { return optim::nm(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
568
570 void defaultOptions() override
571 {
573 m_options.addSwitch("AdaptivePars","scale the contraction, expansion, and shrinkage parameters using the dimension of the optimization problem.",true);
574 m_options.addReal("ParAlpha","Reflection parameter.",1.0);
575 m_options.addReal("ParBeta","Contraction parameter.",0.5);
576 m_options.addReal("ParGamma","Expansion parameter.",2.0);
577 m_options.addReal("ParDelta","Shrinkage parameter.",0.5);
578 m_options.addSwitch("CustomInitialSimplex","whether to use user-defined values for the initial simplex matrix.",false);
579 }
580
584 void setSimplexPoints(const gsMatrix<T> & points)
585 {
586 m_optimSettings.nm_settings.initial_simplex_points = points;
587 }
588
590 void getOptions() override
591 {
593 m_optimSettings.nm_settings.adaptive_pars = m_options.getSwitch("AdaptivePars");
594
595 m_optimSettings.nm_settings.par_alpha = m_options.getReal("ParAlpha");
596 m_optimSettings.nm_settings.par_beta = m_options.getReal("ParBeta");
597 m_optimSettings.nm_settings.par_gamma = m_options.getReal("ParGamma");
598 m_optimSettings.nm_settings.par_delta = m_options.getReal("ParDelta");
599
600 m_optimSettings.nm_settings.custom_initial_simplex = m_options.getSwitch("CustomInitialSimplex");
601 }
602
603protected:
604
605 using Base::m_options;
606 using Base::m_optimSettings;
607};
608
614template <typename T>
615class gsOptimDE : public gsOptim<T>
616{
617public:
618 using Base = gsOptim<T>;
619
620public:
621
623 gsOptimDE(gsOptProblem<T> * problem) : Base(problem)
624 {
625 this->defaultOptions();
626 }
627
630 gsOptProblem<T> & op,
631 optim::algo_settings_t & optimSettings) override
632 { return optim::de(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
633
635 void defaultOptions() override
636 {
638 m_options.addInt("nPop","size of population for each generation.",200);
639 m_options.addInt("nPopBest","number of best individuals to select.",6);
640 m_options.addInt("nGen","number of generations.",1000);
641 m_options.addInt("MutationMethod","the mutation strategy, as described in step one of the algorithm description: 1) rand, 2) best.",1);
642 m_options.addInt("CheckFreq","how many generations to skip when evaluating whether the best candidate value has improved between generations (i.e., to check for potential convergence).",std::numeric_limits<index_t>::max()-1);
643 m_options.addReal("ParF","the mutation parameter.",0.8);
644 m_options.addReal("ParCR","the crossover parameter.",0.9);
645 m_options.addInt("PMax","Maximum number of perturbations.",4);
646 m_options.addInt("MaxFnEval","Maximum number of function evaluations.",100000);
647 m_options.addReal("ParFL","Lower bound for the scaling factor.",0.1);
648 m_options.addReal("ParFU","Upper bound for the scaling factor.",1.0);
649 m_options.addReal("ParTauF","Scaling factor for the scaling factor.",0.1);
650 m_options.addReal("ParTauCR","Scaling factor for the crossover probability.",0.1);
651 m_options.addReal("ParDEps","Small value for numerical stability.",0.5);
652 m_options.addSwitch("ReturnPopulationMat","Whether to return the population matrix.",false);
653 }
654
656 void setBounds(const gsMatrix<T,Dynamic,2> & bounds)
657 {
658 m_optimSettings.de_settings.initial_lb = bounds.col(0);
659 m_optimSettings.de_settings.initial_ub = bounds.col(1);
660 }
661
664 {
665 GISMO_ASSERT(m_optimSettings.de_settings.return_population_mat,"The option ReturnPopulationMat was not set to true.");
666 return m_optimSettings.de_settings.population_mat;
667 }
668
670 void getOptions() override
671 {
673 m_optimSettings.de_settings.n_pop = m_options.getInt("nPop");
674 m_optimSettings.de_settings.n_pop_best = m_options.getInt("nPopBest");
675 m_optimSettings.de_settings.n_gen = m_options.getInt("nGen");
676
677# ifdef _OPENMP
678 m_optimSettings.de_settings.omp_n_threads = omp_get_num_threads(); // numbers of threads to use
679# else
680 m_optimSettings.de_settings.omp_n_threads = -1; // numbers of threads to use
681# endif
682
683 m_optimSettings.de_settings.mutation_method = m_options.getInt("MutationMethod");
684
685 m_optimSettings.de_settings.check_freq = m_options.getInt("CheckFreq");
686
687 m_optimSettings.de_settings.par_F = m_options.getReal("ParF");
688 m_optimSettings.de_settings.par_CR = m_options.getReal("ParCR");
689
690 // DE-PRMM specific
691
692 m_optimSettings.de_settings.pmax = m_options.getInt("PMax");
693 m_optimSettings.de_settings.max_fn_eval = m_options.getInt("MaxFnEval");
694
695 m_optimSettings.de_settings.par_F_l = m_options.getReal("ParFL");
696 m_optimSettings.de_settings.par_F_u = m_options.getReal("ParFU");
697
698 m_optimSettings.de_settings.par_tau_F = m_options.getReal("ParTauF");
699 m_optimSettings.de_settings.par_tau_CR = m_options.getReal("ParTauCR");
700
701 m_optimSettings.de_settings.par_d_eps = m_options.getReal("ParDEps");
702 }
703
704protected:
705
706 using Base::m_options;
707 using Base::m_optimSettings;
708};
709
715template <typename T>
716class gsOptimDEPRMM : public gsOptimDE<T>
717{
718 using Base = gsOptimDE<T>;
719public:
720 gsOptimDEPRMM(gsOptProblem<T> * problem) : Base(problem)
721 {
722 this->defaultOptions();
723 }
724
727 gsOptProblem<T> & op,
728 optim::algo_settings_t & optimSettings) override
729 { return optim::de_prmm(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
730};
731
737template <typename T>
738class gsOptimPSO : public gsOptim<T>
739{
740public:
741 using Base = gsOptim<T>;
742
743public:
744
746 gsOptimPSO(gsOptProblem<T> * problem) : Base(problem)
747 {
748 this->defaultOptions();
749 }
750
753 gsOptProblem<T> & op,
754 optim::algo_settings_t & optimSettings) override
755 { return optim::pso(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
756
758 void defaultOptions() override
759 {
761 m_options.addSwitch("CenterParticle","whether to add a particle that averages across the population in each generation.",true);
762 m_options.addInt("nPop","size of population for each generation.",200);
763 m_options.addInt("nGen","number of generations.",1000);
764 m_options.addInt("OMPThreads","numbers of threads to use.",-1);
765 m_options.addInt("InertiaMethod","1 for linear decreasing between w_min and w_max; 2 for dampening.",1);
766 m_options.addInt("CheckFreq","how many generations to skip when evaluating whether the best candidate value has improved between generations (i.e., to check for potential convergence).",std::numeric_limits<index_t>::max()-1);
767 // m_options.addReal("ParInitialW","Initial value for the inertia weight.",1.0);
768 m_options.addReal("ParWDamp","initial value of the weight parameter w.",0.99);
769 m_options.addReal("ParWMin","lower bound on the weight parameter w.",0.10);
770 m_options.addReal("ParWMax","upper bound on the weight parameter w.",0.99);
771 m_options.addInt("VelocityMethod","set velocity method (1 for fixed values or 2 for linear change from initial to final values).",1);
772 m_options.addReal("ParCCog","value for c_c.",2.0);
773 m_options.addReal("ParCSoc","value for c_s.",2.0);
774 // m_options.addReal("ParInitialCCog","initial value for c_c.",2.5);
775 m_options.addReal("ParFinalCCog","final value for c_c.",0.5);
776 // m_options.addReal("ParInitialCSoc","initial value for c_s.",0.5);
777 m_options.addReal("ParFinalCSoc","final value c_s.",2.5);
778 m_options.addSwitch("ReturnPopulationMat","Whether to return the population matrix.",false);
779 }
780
782 void setBounds(const gsMatrix<T,Dynamic,2> & bounds)
783 {
784 m_optimSettings.pso_settings.initial_lb = bounds.col(0);
785 m_optimSettings.pso_settings.initial_ub = bounds.col(1);
786 }
787
790 {
791 GISMO_ASSERT(m_optimSettings.pso_settings.return_position_mat,"The option ReturnPopulationMat was not set to true.");
792 return m_optimSettings.pso_settings.position_mat;
793 }
794
796 void getOptions() override
797 {
799 m_optimSettings.pso_settings.center_particle = m_options.getSwitch("CenterParticle");
800 m_optimSettings.pso_settings.n_pop = m_options.getInt("nPop");
801 m_optimSettings.pso_settings.n_gen = m_options.getInt("nGen");
802
803# ifdef _OPENMP
804 m_optimSettings.pso_settings.omp_n_threads = omp_get_num_threads(); // numbers of threads to use
805# else
806 m_optimSettings.pso_settings.omp_n_threads = -1; // numbers of threads to use
807# endif
808
809 m_optimSettings.pso_settings.inertia_method = m_options.getInt("InertiaMethod");
810 m_optimSettings.pso_settings.check_freq = m_options.getInt("CheckFreq");
811 // m_optimSettings.pso_settings.par_initial_w = m_options.getReal("ParInitialW");
812 m_optimSettings.pso_settings.par_w_damp = m_options.getReal("ParWDamp");
813 m_optimSettings.pso_settings.par_w_min = m_options.getReal("ParWMin");
814 m_optimSettings.pso_settings.par_w_max = m_options.getReal("ParWMax");
815 m_optimSettings.pso_settings.velocity_method = m_options.getInt("VelocityMethod");
816 m_optimSettings.pso_settings.par_c_cog = m_options.getReal("ParCCog");
817 m_optimSettings.pso_settings.par_c_soc = m_options.getReal("ParCSoc");
818 // m_optimSettings.pso_settings.par_initial_c_cog = m_options.getReal("ParInitialCCog");
819 m_optimSettings.pso_settings.par_final_c_cog = m_options.getReal("ParFinalCCog");
820 // m_optimSettings.pso_settings.par_initial_c_soc = m_options.getReal("ParInitialCSoc");
821 m_optimSettings.pso_settings.par_final_c_soc = m_options.getReal("ParFinalCSoc");
822 m_optimSettings.pso_settings.return_position_mat = m_options.getSwitch("ReturnPopulationMat");
823 }
824
825protected:
826
827 using Base::m_options;
828 using Base::m_optimSettings;
829};
830
836template <typename T>
837class gsOptimPSODV : public gsOptimPSO<T>
838{
839 using Base = gsOptimPSO<T>;
840public:
841
843 gsOptimPSODV(gsOptProblem<T> * problem) : Base(problem)
844 {
845 this->defaultOptions();
846 }
847
850 gsOptProblem<T> & op,
851 optim::algo_settings_t & optimSettings) override
852 { return optim::pso_dv(x, gsOptimWrapper<T>(op), nullptr,optimSettings); }
853};
854
860template <typename T>
861class gsOptimSUMT : public gsOptim<T>
862{
863public:
864 using Base = gsOptim<T>;
865
866public:
867
869 gsOptimSUMT(gsOptProblem<T> * problem) : Base(problem)
870 {
871 this->defaultOptions();
872 }
873
876 gsOptProblem<T> & op,
877 optim::algo_settings_t & optimSettings) override
878 { return optim::sumt(x, gsOptimWrapper<T>(op),nullptr,gsOptimWrapperConstraint<T>(op),nullptr,optimSettings); }
879
881 void solve(const gsMatrix<T> & initialGuess) override
882 {
883 // Get the bounds
884 this->getOptions();
885 this->setConstraints();
886 GISMO_ASSERT(initialGuess.cols()==1,"The initial guess should have vector format");
887 gsVector<T> x = initialGuess.col(0);
888 m_success = callOptim(x, *m_op, m_optimSettings);
889 m_curDesign = x;
890 m_numIterations = m_optimSettings.opt_iter;
891 m_finalObjective = m_optimSettings.opt_fn_value;
892 }
893
895 void defaultOptions() override
896 {
898 m_options.addReal("ParEta","eta parameter.",10.0);
899 }
900
902 void getOptions() override
903 {
905 m_optimSettings.sumt_settings.par_eta = m_options.getReal("ParEta");
906 }
907
908protected:
909
910 using Base::m_options;
911 using Base::m_optimSettings;
912
913 using Base::m_op;
914 using Base::m_success;
915 using Base::m_curDesign;
916 using Base::m_numIterations;
917 using Base::m_finalObjective;
918};
919
920} // end namespace gismo
921
922// // note: statically compiled in header-only mode
923// #ifndef GISMO_BUILD_LIB
924// #include GISMO_HPP_HEADER(gsOptim.hpp)
925// #endif
Creates a mapped object or data pointer to a const vector without copying data.
Definition gsAsMatrix.h:285
Creates a mapped object or data pointer to a matrix without copying data.
Definition gsAsMatrix.h:32
Creates a mapped object or data pointer to a vector without copying data.
Definition gsAsMatrix.h:239
A matrix with arbitrary coefficient type and fixed or dynamic size.
Definition gsMatrix.h:41
Class defining an optimization problem.
Definition gsOptProblem.h:25
Binding to Optim's BFGS solver.
Definition gsOptim.h:287
gsOptimBFGS(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:294
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:300
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:306
void getOptions() override
See gsOptim.
Definition gsOptim.h:314
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
Binding to Optim's CG solver.
Definition gsOptim.h:383
gsOptimCG(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:390
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:396
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:402
void getOptions() override
See gsOptim.
Definition gsOptim.h:413
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
Binding to Optim's DEPRMM solver.
Definition gsOptim.h:717
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:726
Binding to Optim's DE solver.
Definition gsOptim.h:616
gsOptimDE(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:623
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:629
gsMatrix< T > getPopulationMatrix()
Gets the population matrix.
Definition gsOptim.h:663
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:635
void getOptions() override
See gsOptim.
Definition gsOptim.h:670
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
void setBounds(const gsMatrix< T, Dynamic, 2 > &bounds)
Set the Upper and lower bounds of the uniform distributions used to generate the initial population.
Definition gsOptim.h:656
Binding to Optim's GD solver.
Definition gsOptim.h:436
gsOptimGD(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:443
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:449
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:455
void getOptions() override
See gsOptim.
Definition gsOptim.h:478
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
Binding to Optim's LBFGS solver.
Definition gsOptim.h:334
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:347
gsOptimLBFGS(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:341
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:353
void getOptions() override
See gsOptim.
Definition gsOptim.h:362
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
Binding to Optim's NM solver.
Definition gsOptim.h:551
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:564
gsOptimNM(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:558
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:570
void getOptions() override
See gsOptim.
Definition gsOptim.h:590
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
void setSimplexPoints(const gsMatrix< T > &points)
Definition gsOptim.h:584
Binding to Optim's PSODV solver.
Definition gsOptim.h:838
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:849
gsOptimPSODV(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:843
Binding to Optim's PSO solver.
Definition gsOptim.h:739
gsOptimPSO(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:746
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:752
gsMatrix< T > getPopulationMatrix()
Gets the population matrix.
Definition gsOptim.h:789
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:758
void getOptions() override
See gsOptim.
Definition gsOptim.h:796
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
void setBounds(const gsMatrix< T, Dynamic, 2 > &bounds)
Set the Upper and lower bounds of the uniform distributions used to generate the initial population.
Definition gsOptim.h:782
Binding to Optim's SUMT solver.
Definition gsOptim.h:862
bool callOptim(gsVector< T > &x, gsOptProblem< T > &op, optim::algo_settings_t &optimSettings) override
See gsOptim.
Definition gsOptim.h:875
void solve(const gsMatrix< T > &initialGuess) override
See gsOptim.
Definition gsOptim.h:881
void defaultOptions() override
See gsOptim.
Definition gsOptim.h:895
void getOptions() override
See gsOptim.
Definition gsOptim.h:902
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
gsMatrix< T > m_curDesign
Current design variables (and starting point )
Definition gsOptimizer.h:105
gsOptimSUMT(gsOptProblem< T > *problem)
See gsOptim.
Definition gsOptim.h:869
Base class for the Optim wrapper.
Definition gsOptim.h:130
static uPtr get(const std::string &slv, gsOptProblem< T > *problem)
Getter for a specific solver.
Definition gsOptim.h:175
virtual bool callOptim(gsVector< T > &initialGuess, gsOptProblem< T > &op, optim::algo_settings_t &settings)=0
Misc function to call optim.
bool success()
Function returning true when optimization was successful.
Definition gsOptim.h:243
virtual void getOptions()
Options getter.
Definition gsOptim.h:203
virtual void solve(const gsMatrix< T > &initialGuess)
Solve, see gsOptimizer.
Definition gsOptim.h:228
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
gsOptim(gsOptProblem< T > *problem)
Constructor with a gsOptProblem.
Definition gsOptim.h:159
void setConstraints()
Sets the box constraints.
Definition gsOptim.h:251
gsMatrix< T > m_curDesign
Current design variables (and starting point )
Definition gsOptimizer.h:105
gsOptim()
Empty constructor.
Definition gsOptim.h:151
virtual void defaultOptions()
Default options.
Definition gsOptim.h:192
Class defining an optimizer.
Definition gsOptimizer.h:28
gsOptionList m_options
Options.
Definition gsOptimizer.h:108
gsMatrix< T > m_curDesign
Current design variables (and starting point )
Definition gsOptimizer.h:105
void addInt(const std::string &label, const std::string &desc, const index_t &value)
Adds a option named label, with description desc and value value.
Definition gsOptionList.cpp:201
bool getSwitch(const std::string &label) const
Reads value for option label from options.
Definition gsOptionList.cpp:51
const index_t & getInt(const std::string &label) const
Reads value for option label from options.
Definition gsOptionList.cpp:37
Real getReal(const std::string &label) const
Reads value for option label from options.
Definition gsOptionList.cpp:44
void addReal(const std::string &label, const std::string &desc, const Real &value)
Adds a option named label, with description desc and value value.
Definition gsOptionList.cpp:211
void addSwitch(const std::string &label, const std::string &desc, const bool &value)
Adds a option named label, with description desc and value value.
Definition gsOptionList.cpp:235
A vector with arbitrary coefficient type and fixed or dynamic size.
Definition gsVector.h:37
#define GISMO_ERROR(message)
Definition gsDebug.h:118
#define GISMO_ASSERT(cond, message)
Definition gsDebug.h:89
This is the main header file that collects wrappers of Eigen for linear algebra.
Provides declaration of an optimization problem.
This file is part of the G+Smo library.
The G+Smo namespace, containing all definitions for the library.
Wraps the constraint of an optimization problem to a function accepted by Optim.
Definition gsOptim.h:85
optim::ColVec_t operator()(const optim::ColVec_t &vals_in, optim::Mat_t *jacob_out, void *constr_data)
Definition gsOptim.h:91
Wraps the objective of an optimization problem to a function accepted by Optim.
Definition gsOptim.h:56