G+Smo  24.08.0
Geometry + Simulation Modules
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
gsCurvatureSmoothing.hpp
Go to the documentation of this file.
1 
19 #pragma once
20 
22 
23 namespace gismo
24 {
25 
26 template<class T>
27 void gsCurvatureSmoothing<T>::smoothTotalVariation(const T omega1, const T omega2, const T lamda, const T tau, const unsigned iter)
28 {
29  gsKnotVector<T> m_knots=m_curve_smooth->knots(); // take the knots of the current smooth curve
30  index_t m_degree=m_curve_smooth->degree(); // degree of the smooth curve
31 
32  gsMatrix<T> current_coefs=m_curve_smooth->coefs(); // the coefficients of the current smooth curve
33 
34  gsMatrix<T> different_coefs; //later needed for current selection of the coefficients for the decreasing lamdas
35 
36  index_t num_rows=current_coefs.rows()-m_degree; //number of rows of the coefficients
37  index_t num_cols=current_coefs.cols(); // number of columns of the coefficients
38 
39  gsMatrix<T> deriv_coefs1; //needed later for numerical differentiation to construct the gradient (upper value)
40  gsMatrix<T> deriv_coefs2; //needed later for numerical differentiation to construct the gradient (lower value)
41 
42  gsMatrix<T> m_gradient(num_rows,num_cols); // the gradient in each step be aware that we have a closed curve that means the first coefficients are equal to the last one
43 
44 
45  gsMatrix<T> deriv_coefs3; //needed later for numerical differentiation to construct the gradient (upper value) in the lamda*gradient direction (for the backtracking line search method)
46  gsMatrix<T> deriv_coefs4; //needed later for numerical differentiation to construct the gradient (lower value) in the lamda*gradient direction (for the backtracking line search method)
47 
48  gsMatrix<T> m_gradient2(num_rows,num_cols); // the gradient in the lamda*gradient direction (for line search method)
49 
50  T delta=0.0000001; // for numerical differentiation
51 
52  // the needed basis -- needed for constructing the derivatives
53  gsBSplineBasis<T> * basis = new gsBSplineBasis<T>(m_knots);
54 
55  //needed for computing the objective value and the derivatives
56  T m_value0=0;
57  T m_value1=0;
58  T m_value2=0;
59 
60  // for the iteration step for the different lamdas later
61  T lamda_value;
62  T m_lamda=lamda;
63 
64  //the constants of the Armijio-Goldstein (Wolfe) condition
65  T c1=0.0001;
66  T c2=0.9;
67  // the different sides of the Armijio-Goldstein (Wolfe) condition
68  T cond11,cond12,cond21,cond22,helpcond;
69 
70  // the objective function for the current coefficients
71  // here computed for the first iteration step - for the next steps it will be computed already in the end of the loop
72  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
73 
74 
75 
76  for(unsigned i=0;i<iter;i++){
77  // gradient descent method
78  //computes the gradient with the help of numerical differentiation (2 point formula)
79  for(index_t j=0;j<num_rows;j++){
80  for(index_t k=0;k<num_cols;k++){
81  deriv_coefs1 = current_coefs;
82  deriv_coefs1(j,k)+=delta;
83  deriv_coefs2 = current_coefs;
84  deriv_coefs2(j,k)-=delta;
85 
86  //because of closed curve -- some first and last are equal
87  if(j<m_degree){
88  deriv_coefs1(j+num_rows,k)+=delta;
89  deriv_coefs2(j+num_rows,k)-=delta;
90  }
91 
92  // compute the objective function for numerical differentiation by means of the 2 point formula
93  compute_ObjectiveFunction(basis,&deriv_coefs1,omega1,omega2,m_value1);
94  compute_ObjectiveFunction(basis,&deriv_coefs2,omega1,omega2,m_value2);
95 
96  //initialize the gradient
97  m_gradient(j,k)=(m_value1-m_value2)/((T)(2)*delta);
98  }
99  }
100 
101 
102  // we have to find the right lamda --> with line searching method!!
103  m_lamda=lamda;
104  cond11=1;
105  cond12=0;
106  cond21=1;
107  cond22=0;
108 
109  while( (cond11 > cond12) || (cond21 > cond22) ) {
110  /* first step computing the objective value in the lamda*gradient direction */
111  different_coefs=current_coefs;
112  different_coefs.conservativeResize(num_rows,num_cols);
113  different_coefs=different_coefs-m_lamda*m_gradient;
114  different_coefs.conservativeResize(num_rows+m_degree,num_cols);
115  for(index_t k=0;k<m_degree;k++){
116  different_coefs.row(num_rows+k)=different_coefs.row(k);
117  }
118  //here computing the value for the lamda*gradient direction
119  compute_ObjectiveFunction(basis,&different_coefs,omega1,omega2,lamda_value);
120 
121  /* second step computing the gradient in the lamda*gradient direction */
122  //computes the gradient in the lamda*gradient direction with the help of numerical differentiation (2 point formula)
123  for(index_t j=0;j<num_rows;j++){
124  for(index_t k=0;k<num_cols;k++){
125  deriv_coefs3 = different_coefs;
126  deriv_coefs3(j,k)+=delta;
127  deriv_coefs4 = different_coefs;
128  deriv_coefs4(j,k)-=delta;
129 
130  //because of closed curve -- some first and last are equal
131  if(j<m_degree){
132  deriv_coefs3(j+num_rows,k)+=delta;
133  deriv_coefs4(j+num_rows,k)-=delta;
134  }
135 
136  // compute the objective function for numerical differentiation by means of the 2 point formula
137  compute_ObjectiveFunction(basis,&deriv_coefs3,omega1,omega2,m_value1);
138  compute_ObjectiveFunction(basis,&deriv_coefs4,omega1,omega2,m_value2);
139 
140  //initialize the gradient
141  m_gradient2(j,k)=(m_value1-m_value2)/((T)(2)*delta);
142  }
143  }
144 
145  /* third step initialising the different sides of the Armijio-Goldstein (Wolfe) conditions */
146  cond11=lamda_value;
147 
148 
149  helpcond=0;
150  for(index_t j=0;j<m_gradient.rows();j++){
151  for(index_t k=0;k<m_gradient.cols();k++){
152  if(j<m_degree){ // do not forget we have multiple coefficients
153  helpcond+=(T)(2)*m_gradient(j,k)*m_gradient(j,k);
154  }
155  else{
156  helpcond+=m_gradient(j,k)*m_gradient(j,k);
157  }
158  }
159  }
160  cond12=m_lamda*c1*helpcond+m_value0;
161 
162 
163  cond21=0;
164  for(index_t j=0;j<m_gradient.rows();j++){
165  for(index_t k=0;k<m_gradient.cols();k++){
166  if(j<m_degree){ // do not forget we have multiple coefficients
167  cond21+=(T)(2)*m_gradient(j,k)*m_gradient2(j,k);
168  }
169  else{
170  cond21+=m_gradient(j,k)*m_gradient2(j,k);
171  }
172  }
173  }
174  cond21=math::abs(cond21);
175 
176  cond22=math::abs(c2*helpcond);
177 
178  //for next testing if condition is satisfied
179  m_lamda=m_lamda*tau;
180  }
181 
182  m_lamda=m_lamda/tau; // to have the right m_lamda in the output
183 
184  //computing the coefficient after knowing stepsize and descent direction
185  current_coefs.conservativeResize(num_rows,num_cols);
186  current_coefs=current_coefs-m_lamda*m_gradient;
187  current_coefs.conservativeResize(num_rows+m_degree,num_cols);
188  for(index_t k=0;k<m_degree;k++){
189  current_coefs.row(num_rows+k)=current_coefs.row(k);
190  }
191 
192  // the objective function for the current coefficients for the next iteration step and for the output of the error
193  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
194 
195  gsDebug << "Step: " << i+1 << " lamda: " << m_lamda <<" objective value: " <<m_value0 << "\n";
196  }
197  // construct the new smoother B-spline curve
198  delete basis;
199  reset( new gsBSpline<T>(m_knots, give(current_coefs)) );
200 }
201 
202 template<class T>
203 void gsCurvatureSmoothing<T>::smoothTotalVariationSelectLamda(const T omega1, const T omega2, const gsMatrix<T> listlamdas, const unsigned iter)
204 {
205  gsKnotVector<T> m_knots=m_curve_smooth->knots(); // take the knots of the current smooth curve
206  index_t m_degree=m_curve_smooth->degree(); // degree of the smooth curve
207 
208  gsMatrix<T> current_coefs=m_curve_smooth->coefs(); // the coefficients of the current smooth curve
209 
210  gsMatrix<T> different_coefs;
211 
212  index_t num_rows=current_coefs.rows()-m_degree; //number of rows of the coefficients
213  index_t num_cols=current_coefs.cols(); // number of columns of the coefficients
214 
215  gsMatrix<T> deriv_coefs1; //needed later for numerical differentiation to construct the gradient (upper value)
216  gsMatrix<T> deriv_coefs2; //needed later for numerical differentiation to construct the gradient (lower value)
217 
218  gsMatrix<T> m_gradient(num_rows,num_cols); // the gradient in each step be aware that we have a closed curve that means the first coefficients are equal to the last one
219 
220  T delta=0.0000001; // for numerical differentiation
221 
222  // the needed basis -- needed for constructing the derivatives
223  gsBSplineBasis<T> * basis = new gsBSplineBasis<T>(m_knots);
224 
225 
226  T m_value0=0;
227  T m_value1=0;
228  T m_value2=0;
229 
230  // for the iteration step for the different lamdas later
231  T lamda_value;
232  T max_value=1000000;
233  T m_lamda=1;
234 
235  // the objective function for the current coefficients
236  // here computed for the first iteration step - for the next steps it will be computed already in the end of the loop
237  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
238 
239  for(unsigned i=0;i<iter;i++){
240  // gradient descent method
241  //computes the gradient with the help of numerical differentiation (2 point formula)
242  for(index_t j=0;j<num_rows;j++){
243  for(index_t k=0;k<num_cols;k++){
244  deriv_coefs1 = current_coefs;
245  deriv_coefs1(j,k)+=delta;
246  deriv_coefs2 = current_coefs;
247  deriv_coefs2(j,k)-=delta;
248 
249  //because of closed curve -- some first and last are equal
250  if(j<m_degree){
251  deriv_coefs1(j+num_rows,k)+=delta;
252  deriv_coefs2(j+num_rows,k)-=delta;
253  }
254 
255  // compute the objective function for numerical differentiation by means of the 2 point formula
256  compute_ObjectiveFunction(basis,&deriv_coefs1,omega1,omega2,m_value1);
257  compute_ObjectiveFunction(basis,&deriv_coefs2,omega1,omega2,m_value2);
258 
259  //initialize the gradient
260  m_gradient(j,k)=(m_value1-m_value2)/((T)(2)*delta);
261  }
262  }
263 
264  //iteration steps for different lamdas
265  //we have first to break down to non-multiple coefficients and then the iteration step and then again generate the multiple coefficients
266 
267  max_value=1000000;
268 
269  // for different lemmas
270  for(index_t j=0;j<listlamdas.cols();j++){
271 
272  different_coefs=current_coefs;
273 
274  different_coefs.conservativeResize(num_rows,num_cols);
275  different_coefs=different_coefs-listlamdas(0,j)*m_gradient;
276  different_coefs.conservativeResize(num_rows+m_degree,num_cols);
277  for(index_t k=0;k<m_degree;k++){
278  different_coefs.row(num_rows+k)=different_coefs.row(k);
279  }
280 
281  //here computing the values for the different lamdas
282  compute_ObjectiveFunction(basis,&different_coefs,omega1,omega2,lamda_value);
283 
284  //if objective function value smaller than change the value
285  if(lamda_value<max_value){
286  max_value=lamda_value;
287  m_lamda=listlamdas(0,j);
288  }
289  }
290 
291 
292  current_coefs.conservativeResize(num_rows,num_cols);
293  current_coefs=current_coefs-m_lamda*m_gradient;
294  current_coefs.conservativeResize(num_rows+m_degree,num_cols);
295  for(index_t k=0;k<m_degree;k++){
296  current_coefs.row(num_rows+k)=current_coefs.row(k);
297  }
298 
299  // the objective function for the current coefficients for the next iteration step and for the output of the error
300  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
301 
302  gsDebug << "Step: " << i+1 << " lamda: " << m_lamda <<" objective value: " <<m_value0 << "\n";
303 
304  }
305  delete basis;
306 
307  // construct the new smoother B-spline curve
308  reset( new gsBSpline<T>(m_knots, give(current_coefs)) );
309 }
310 
311 
312 template<class T>
313 void gsCurvatureSmoothing<T>::smoothTotalVariationSelectLamda(const T omega1, const T omega2, const T lamda, const unsigned iter)
314 {
315  gsKnotVector<T> m_knots=m_curve_smooth->knots(); // take the knots of the current smooth curve
316  index_t m_degree=m_curve_smooth->degree(); // degree of the smooth curve
317 
318  gsMatrix<T> current_coefs=m_curve_smooth->coefs(); // the coefficients of the current smooth curve
319 
320  index_t num_rows=current_coefs.rows()-m_degree; //number of rows of the coefficients
321  index_t num_cols=current_coefs.cols(); // number of columns of the coefficients
322 
323  gsMatrix<T> deriv_coefs1; //needed later for numerical differentiation to construct the gradient (upper value)
324  gsMatrix<T> deriv_coefs2; //needed later for numerical differentiation to construct the gradient (lower value)
325 
326  gsMatrix<T> m_gradient(num_rows,num_cols); // the gradient in each step be aware that we have a closed curve that means the first coefficients are equal to the last one
327 
328  T delta=0.0000001; // for numerical differentiation
329 
330  // the needed basis -- needed for constructing the derivatives
331  gsBSplineBasis<T> * basis = new gsBSplineBasis<T>(m_knots);
332 
333 
334  T m_value0=0;
335  T m_value1=0;
336  T m_value2=0;
337 
338  // the objective function for the current coefficients
339  // here computed for the first iteration step - for the next steps it will be computed already in the end of the loop
340  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
341 
342  for(unsigned i=0;i<iter;i++){
343  // gradient descent method
344  //computes the gradient with the help of numerical differentiation (2 point formula)
345  for(index_t j=0;j<num_rows;j++){
346  for(index_t k=0;k<num_cols;k++){
347  deriv_coefs1 = current_coefs;
348  deriv_coefs1(j,k)+=delta;
349  deriv_coefs2 = current_coefs;
350  deriv_coefs2(j,k)-=delta;
351 
352  //because of closed curve -- some first and last are equal
353  if(j<m_degree){
354  deriv_coefs1(j+num_rows,k)+=delta;
355  deriv_coefs2(j+num_rows,k)-=delta;
356  }
357 
358  // compute the objective function for numerical differentiation by means of the 2 point formula
359  compute_ObjectiveFunction(basis,&deriv_coefs1,omega1,omega2,m_value1);
360  compute_ObjectiveFunction(basis,&deriv_coefs2,omega1,omega2,m_value2);
361 
362  //initialize the gradient
363  m_gradient(j,k)=(m_value1-m_value2)/((T)(2)*delta);
364  }
365  }
366  //iteration step for a given lamda
367  //we have first to break down to non-multiple coefficients and then the iteration step and then again generate the multiple coefficients
368  current_coefs.conservativeResize(num_rows,num_cols);
369  current_coefs=current_coefs-lamda*m_gradient;
370  current_coefs.conservativeResize(num_rows+m_degree,num_cols);
371  for(index_t k=0;k<m_degree;k++){
372  current_coefs.row(num_rows+k)=current_coefs.row(k);
373  }
374 
375  // the objective function for the current coefficients for the next iteration step and for the output of the error
376  compute_ObjectiveFunction(basis,&current_coefs,omega1,omega2,m_value0);
377 
378  gsDebug << "Step: " << i+1 << " lamda: " << lamda <<" objective value: " <<m_value0 << "\n";
379  }
380  delete basis;
381 
382  // construct the new smoother B-spline curve
383  reset( new gsBSpline<T>(m_knots, give(current_coefs)) );
384 }
385 
386 
387 template<class T>
388 void gsCurvatureSmoothing<T>::smoothHadenfeld(const unsigned smooth_degree, const T delta, const index_t iter_step, const index_t iter_total, gsVector<index_t> &iterated, const bool original)
389 {
390  using std::min;
391 
392  index_t m_degree=m_curve_smooth->degree(); // degree of the curve
393  gsMatrix<T> m_coefs=m_curve_smooth->coefs(); // get the coefficients they will be changed
394  index_t num_rows=m_coefs.rows()-m_degree; // the last for coefficients are equal
395  m_coefs.conservativeResize(num_rows,2); // the coefficients without the last equal points
396 
397  //iter_total could be too high chosen compared with the iter_step -- maximal allowed iter_step*num_roes <= iter_total - otherwise there could be unwanted effects for the resulting curve!!
398  index_t m_iter_total= min(iter_total,iter_step*num_rows);
399 
400  //construct coefficients which will be not changed - from which type the should be compared original or first smoothed one
401  gsMatrix<T> m_coefs_original;
402  if(original==true){
403  m_coefs_original=m_curve_original->coefs(); // get the original coefficients which will not be changed
404  m_coefs_original.conservativeResize(num_rows,2);
405  }
406 
407  else{
408  m_coefs_original=m_coefs; // these coefficients will not be changed => take them from already pre-smoothed one
409  }
410 
411  // describes later how often a coefficient has been already changed!! changing iterator
412  gsVector<index_t> m_iterated(num_rows);
413  m_iterated.setZero();
414 
415  T max_value,coef0,coef1,current0,current1,dist_value,m_smooth1,m_smooth2,m_smooth3,m_smooth4;
416  index_t index=0;
417 
418  // the degree of smoothing inplies the smoother (i.e. the mask for smoothing)
419  if(smooth_degree==2){
420  m_smooth1=(43.0/95.0);
421  m_smooth2=(16.0/95.0);
422  m_smooth3=-(11.0/95.0);
423  m_smooth4=-(1.0/190.0);
424  }
425  else if (smooth_degree==4){
426  m_smooth1=(4.0/5.0);
427  m_smooth2=-(2.0/5.0);
428  m_smooth3=(4.0/35.0);
429  m_smooth4=-(1.0/70.0);
430  }
431  else { // smooth degree 3 -- default smooth degree
432  m_smooth1=(17.0/25.0);
433  m_smooth2=-(4.0/25.0);
434  m_smooth3=-(1.0/25.0);
435  m_smooth4=(1.0/50.0);
436  }
437 
438 
439  // Hadenfelds algorithm (for more detail see his PhD thesis)
440  for(index_t j=0;j<m_iter_total;j++){
441  max_value=-100;
442  coef0=0;
443  coef1=0;
444  index=0;
445 
446  for(index_t i=0;i<num_rows;i++){
447  if(m_iterated(i)<iter_step){ // aks if the iter_step for one coefficient is already reached
448  // computation of the new coefficients
449  current0=m_smooth1*m_coefs((num_rows+i-1)%num_rows,0)+m_smooth1*m_coefs((i+1)%num_rows,0)+m_smooth2*m_coefs((num_rows+i-2)%num_rows,0)
450  +m_smooth2*m_coefs((i+2)%num_rows,0)+m_smooth3*m_coefs((num_rows+i-3)%num_rows,0)+m_smooth3*m_coefs((i+3)%num_rows,0)
451  +m_smooth4*m_coefs((num_rows+i-4)%num_rows,0)+m_smooth4*m_coefs((i+4)%num_rows,0);
452  current1=m_smooth1*m_coefs((num_rows+i-1)%num_rows,1)+m_smooth1*m_coefs((i+1)%num_rows,1)+m_smooth2*m_coefs((num_rows+i-2)%num_rows,1)
453  +m_smooth2*m_coefs((i+2)%num_rows,1)+m_smooth3*m_coefs((num_rows+i-3)%num_rows,1)+m_smooth3*m_coefs((i+3)%num_rows,1)
454  +m_smooth4*m_coefs((num_rows+i-4)%num_rows,1)+m_smooth4*m_coefs((i+4)%num_rows,1);
455 
456  // the distance of the new cofficient compared to the older one from last step
457  dist_value=sqrt((current0-m_coefs(i,0))*(current0-m_coefs(i,0))+(current1-m_coefs(i,1))*(current1-m_coefs(i,1)));
458  // for this setting the distance is the largest until now => change values
459  if(dist_value>max_value){
460  coef0=current0;
461  coef1=current1;
462  index=i;
463  max_value=dist_value;
464  }
465 
466  }
467  }
468  // for the coefficient index the iterator will be increased by one
469  m_iterated(index)++;
470  // computes locally the new coefficient depending on how large the distance has been -< checks if the new values are not too far away from the original curve
471  max_value=sqrt((coef0-m_coefs_original(index,0))*(coef0-m_coefs_original(index,0))+(coef1-m_coefs_original(index,1))*(coef1-m_coefs_original(index,1)));
472  if(max_value>delta){
473  m_coefs(index,0)= m_coefs_original(index,0)+(delta/max_value)*(coef0-m_coefs_original(index,0));
474  m_coefs(index,1)= m_coefs_original(index,1)+(delta/max_value)*(coef1-m_coefs_original(index,1));
475 
476  }
477  else{
478  m_coefs(index,0)=coef0;
479  m_coefs(index,1)=coef1;
480  }
481  }
482 
483  // coefficient for the closed curve again
484  m_coefs.conservativeResize(num_rows+m_degree,2);
485  for(index_t k=0;k<m_degree;k++){
486  m_coefs.row(num_rows+k)=m_coefs.row(k);
487  }
488 
489  // the knot vector of the curve
490  gsKnotVector<T> m_knots;
491  m_knots=m_curve_smooth->knots();
492 
493  // construct the new smoother B-spline curve
494  reset( new gsBSpline<T>(m_knots, give(m_coefs)) );
495  iterated=m_iterated;
496 
497 }
498 
499 template<class T>
500 void gsCurvatureSmoothing<T>::smoothAllHadenfeld(const unsigned smooth_degree, const unsigned iter)
501 {
502  index_t m_degree=m_curve_smooth->degree(); // degree of the curve
503  gsMatrix<T> m_coefs=m_curve_smooth->coefs(); // get the coefficients
504  index_t num_rows=m_coefs.rows()-m_degree; // the last for coefficients are equal
505  m_coefs.conservativeResize(num_rows,2); // the coefficients without the last equal points
506  gsMatrix<T> m_A(num_rows,num_rows);
507  m_A.setZero(); // ensure that all entries are zero in the beginning
508 
509 
510  T m_smooth1, m_smooth2, m_smooth3, m_smooth4;
511  // the degree of smoothing inplies the smoother (i.e. the matrix for smoothing)
512  if(smooth_degree==2){
513  m_smooth1=(43.0/95.0);
514  m_smooth2=(16.0/95.0);
515  m_smooth3=-(11.0/95.0);
516  m_smooth4=-(1.0/190.0);
517  }
518  else if(smooth_degree==4){
519  m_smooth1=(4.0/5.0);
520  m_smooth2=-(2.0/5.0);
521  m_smooth3=(4.0/35.0);
522  m_smooth4=-(1.0/70.0);
523  }
524  else { // smooth degree 3 -- also default smooth degree
525  m_smooth1=(17.0/25.0);
526  m_smooth2=-(4.0/25.0);
527  m_smooth3=-(1.0/25.0);
528  m_smooth4=(1.0/50.0);
529  }
530 
531  // set the smoothing matrix
532  for (index_t i=0;i<num_rows;i++){
533  m_A(i,(num_rows+i-1)%num_rows)=m_smooth1;
534  m_A(i,(i+1)%num_rows)=m_smooth1;
535  m_A(i,(num_rows+i-2)%num_rows)=m_smooth2;
536  m_A(i,(i+2)%num_rows)=m_smooth2;
537  m_A(i,(num_rows+i-3)%num_rows)=m_smooth3;
538  m_A(i,(i+3)%num_rows)=m_smooth3;
539  m_A(i,(num_rows+i-4)%num_rows)=m_smooth4;
540  m_A(i,(i+4)%num_rows)=m_smooth4;
541  }
542 
543  // smoothing
544  for(unsigned k=0;k<iter;k++){
545  m_coefs=m_A*m_coefs;
546  }
547 
548 
549  // coefficient for the closed curve again
550  m_coefs.conservativeResize(num_rows+m_degree,2);
551  for(index_t k=0;k<m_degree;k++){
552  m_coefs.row(num_rows+k)=m_coefs.row(k);
553  }
554 
555  // the knot vector of the curve
556  gsKnotVector<T> m_knots;
557  m_knots=m_curve_smooth->knots();
558 
559  // construct the new smoother B-spline curve
560  reset( new gsBSpline<T>(m_knots, give(m_coefs)) );
561 
562 }
563 
564 template< class T>
565 void gsCurvatureSmoothing<T>::write(std::ostream &os)
566 {
567  gsMatrix<T> m_coefs=m_curve_smooth->coefs(); // get the coefficients
568  index_t num_rows=m_coefs.rows();
569  os << "{";
570  for(index_t k=0;k<num_rows-1;k++){
571  os << "{" << m_coefs(k,0) << "," << m_coefs(k,1) << "},";
572  }
573  os << "{" << m_coefs(num_rows-1,0) << "," << m_coefs(num_rows-1,1) << "}}\n";
574 
575 }
576 
577 template<class T>
579 {
580  gsMatrix<T> results;
581  m_curve_smooth->eval_into(m_param_values.transpose(),results); // the points of the curve for the corresponding parameter values
582  results.transposeInPlace();
583  error=0;
584  //computing the approximation error = sum_i ||x(u_i)-p_i||^2
585  for(index_t k=0;k<m_points.rows();k++){
586  error+=math::pow(m_points(k,0)-results(k,0),2)+math::pow(m_points(k,1)-results(k,1),2);
587  }
588 }
589 
590 template<class T>
592 {
593  error=0;
594  // uses the approximation error. Since the the parameter domain is [0,1] of the function the L^2 error = (approx.error/points)^{1/2}
595  computeApproxError(error);
596  error= math::sqrt( error / static_cast<T>(m_points.rows()) );
597 }
598 
599 
600 template<class T>
602 {
603  gsMatrix<T> results;
604  m_curve_smooth->eval_into(m_param_values.transpose(),results); // the points of the curve for the corresponding parameter values
605  results.transposeInPlace();
606  error = 0.0;
607  //computing the Lmax approximation error
608  for(index_t k=0;k<m_points.rows();k++)
609  {
610  error= math::max(error,(T)math::sqrt(math::pow(m_points(k,0)-results(k,0),2)+math::pow(m_points(k,1)-results(k,1),2)));
611  }
612 }
613 
614 
615 template<class T>
617 {
618  const gsMatrix<T> & coefs_original=m_curve_original->coefs(); //coefficients of the original curve
619  const gsMatrix<T> & coefs_smooth=m_curve_smooth->coefs(); //coefficients of the smoothed curve
620 
621  error=0;
622 
623  //computing the maximal coefficient approximation error
624  for(index_t k=0;k<coefs_original.rows();k++)
625  {
626  error= math::max(error, (coefs_original.row(k) - coefs_smooth.row(k)).norm() );
627  }
628 }
629 
630 
631 template<class T>
633 {
634 
635  const gsKnotVector<T> & m_knots=m_curve_smooth->knots(); // take the knots of the current smooth curve
636 
637  gsMatrix<T> & current_coefs=m_curve_smooth->coefs(); // the coefficients of the current smooth curve
638 
639  // the needed basis
640  gsBSplineBasis<T> * basis = new gsBSplineBasis<T>(m_knots);
641 
642  error=0;
643 
644  //computation of the curvature error
645  compute_ObjectiveFunction(basis,&current_coefs,0,1,error);
646  delete basis;
647 }
648 
649 template<class T>
651 {
652 
653  std::vector<gsMatrix<T> > m_results;
654  gsMatrix<T> m_results1;
655  gsMatrix<index_t> actives;
656  basis->evalAllDers_into(u,3,m_results);
657  basis->active_into(u,actives);
658 
659  // have to resize the input matrices to ensure to have the right size
660  values0.resize(coefs->cols(),u.cols());
661  values1.resize(coefs->cols(),u.cols());
662  values2.resize(coefs->cols(),u.cols());
663  values3.resize(coefs->cols(),u.cols());
664  values0.setZero();
665  values1.setZero();
666  values2.setZero();
667  values3.setZero();
668 
669  // how many actives for one parameter value (i.e degree + 1)
670  index_t num=actives.rows();
671 
672  //computes the values and the derivatives at the parameter values for the coefs
673  for(index_t i=0;i<u.cols();i++)
674  for(index_t k=0;k<num;k++)
675  {
676  values0.col(i)+=coefs->row(actives(k,i))*m_results[0](k,i);
677  values1.col(i)+=coefs->row(actives(k,i))*m_results[1](k,i);
678  values2.col(i)+=coefs->row(actives(k,i))*m_results[2](k,i);
679  values3.col(i)+=coefs->row(actives(k,i))*m_results[3](k,i);
680  }
681 
682 }
683 
684 
685 template<class T>
686 void gsCurvatureSmoothing<T>::compute_ObjectiveFunction(gsBSplineBasis<T> *basis, gsMatrix<T> *coefs, const T omega1, const T omega2, T & value)
687 {
688 
689  gsMatrix<T> m_values0;
690  gsMatrix<T> m_values1;
691  gsMatrix<T> m_values2;
692  gsMatrix<T> m_values3;
693 
694  T objective1=0;
695  T objective2=0;
696 
697  //computes all derivatives (0-th to 3rd)
698  compute_AllValues(basis,m_param_values.transpose(),coefs,m_values0,m_values1,m_values2,m_values3);
699 
700  // using numerical integration for computing the values - since we have a closed curve rectangle method = trapezoidal rule
701  // numerical integration by rectangle method == trapezoidal rule (because of closed curve!!!!)
702  for(index_t i=0;i<m_param_values.rows();i++){
703  objective1+=math::pow(m_values0(0,i)-m_points(i,0),2)+math::pow(m_values0(1,i)-m_points(i,1),2);
704 
705  objective2+= math::abs( 6.0*(m_values1(1,i)*m_values2(0,i) - m_values1(0,i)*m_values2(1,i))*(m_values1(0,i)*m_values2(0,i) + m_values1(1,i)*m_values2(1,i)) +
706  (T)(2)*( (math::pow(m_values1(0,i),2)+math::pow(m_values1(1,i),2)) * ((-1.0)*m_values1(1,i)*m_values3(0,i)+ m_values1(0,i)*m_values3(1,i)) ) )/
707  ((T)(2)*math::pow( math::pow(m_values1(0,i),2)+math::pow(m_values1(1,i),2) ,2.5) );
708  }
709  objective2=objective2/((T)(0.0+m_param_values.rows()));
710 
711  // the objective function
712  value=omega1*objective1+omega2*objective2;
713 }
714 
715 
716 
717 } // namespace gismo
#define gsDebug
Definition: gsDebug.h:61
void smoothTotalVariationSelectLamda(const T omega1, const T omega2, const gsMatrix< T > listlamdas, const unsigned iter=50)
Definition: gsCurvatureSmoothing.hpp:203
void computeCurvatureError(T &error)
computes the curvature error of the smoother curve
Definition: gsCurvatureSmoothing.hpp:632
void computeApproxErrorCoef(T &error)
computes the max approximation error between the coeffeicientsof the original and the smoother curve ...
Definition: gsCurvatureSmoothing.hpp:616
S give(S &x)
Definition: gsMemory.h:266
void compute_ObjectiveFunction(gsBSplineBasis< T > *basis, gsMatrix< T > *coefs, const T omega1, const T omega2, T &value)
computes the objective function for given coefs and omega1 and omega2 – objective function = omega1*A...
Definition: gsCurvatureSmoothing.hpp:686
#define index_t
Definition: gsConfig.h:32
void computeApproxErrorL2(T &error)
computes the L^{2}-norm approximation error of the smoother curve
Definition: gsCurvatureSmoothing.hpp:591
A B-spline function of one argument, with arbitrary target dimension.
Definition: gsBSpline.h:50
void smoothTotalVariation(const T omega1, const T omega2, const T lamda, const T tau, const unsigned iter=50)
Definition: gsCurvatureSmoothing.hpp:27
Computes a closed B-spline curve with a smaller number of curvature extrema compared to a given close...
A univariate B-spline basis.
Definition: gsBSplineBasis.h:694
virtual void evalAllDers_into(const gsMatrix< T > &u, int n, std::vector< gsMatrix< T > > &result, bool sameElement=false) const
Evaluate the nonzero functions and their derivatives up to order n at points u into result...
Definition: gsBSplineBasis.hpp:864
void computeApproxErrorLMax(T &error)
computes the L-max-norm approximation error of the smoother curve
Definition: gsCurvatureSmoothing.hpp:601
void smoothHadenfeld(const unsigned smooth_degree, const T delta, const index_t iter_step, const index_t iter_total, gsVector< index_t > &iterated, const bool original=true)
smooth the curve by smoothing only one cofficient in each step using the Hadenfeld algorithm — the us...
Definition: gsCurvatureSmoothing.hpp:388
void compute_AllValues(gsBSplineBasis< T > *basis, gsMatrix< T > u, gsMatrix< T > *coefs, gsMatrix< T > &values0, gsMatrix< T > &values1, gsMatrix< T > &values2, gsMatrix< T > &values3)
computes all values and derivatives (up to three) at the parameter values u for the given coefs ...
Definition: gsCurvatureSmoothing.hpp:650
void smoothAllHadenfeld(const unsigned smooth_degree=4, const unsigned iter=500)
Definition: gsCurvatureSmoothing.hpp:500
void active_into(const gsMatrix< T > &u, gsMatrix< index_t > &result) const
Returns the indices of active basis functions at points u, as a list of indices, in result...
Definition: gsBSplineBasis.hpp:127
Class for representing a knot vector.
Definition: gsKnotVector.h:79
EIGEN_STRONG_INLINE abs_expr< E > abs(const E &u)
Absolute value.
Definition: gsExpressions.h:4486
void write(std::ostream &os)
writes the smooth curve to a file, which can be visualized in Mathematica (Name of Mathematica File V...
Definition: gsCurvatureSmoothing.hpp:565
void computeApproxError(T &error)
computes approximation error of the smoother curve to the original point cloud
Definition: gsCurvatureSmoothing.hpp:578