This commit is contained in:
Nicole Dresselhaus 2017-10-29 21:44:10 +01:00
parent 103b629b84
commit 18727a857d
Signed by: Drezil
GPG Key ID: 057D94F356F41E25
10 changed files with 462 additions and 453 deletions

View File

@ -69,7 +69,7 @@
Acmid = {2079770}, Acmid = {2079770},
Doi = {10.1007/11844297_36}, Doi = {10.1007/11844297_36},
ISBN = {3-540-38990-3, 978-3-540-38990-3}, ISBN = {978-3-540-38990-3},
Location = {Reykjavik, Iceland}, Location = {Reykjavik, Iceland},
Numpages = {10}, Numpages = {10},
Url = {http://dx.doi.org/10.1007/11844297_36} Url = {http://dx.doi.org/10.1007/11844297_36}
@ -86,13 +86,14 @@
Url = {https://www.researchgate.net/profile/Yaneer_Bar-Yam/publication/225104044_Complex_Engineered_Systems_A_New_Paradigm/links/59107f20a6fdccbfd57eb84d/Complex-Engineered-Systems-A-New-Paradigm.pdf} Url = {https://www.researchgate.net/profile/Yaneer_Bar-Yam/publication/225104044_Complex_Engineered_Systems_A_New_Paradigm/links/59107f20a6fdccbfd57eb84d/Complex-Engineered-Systems-A-New-Paradigm.pdf}
} }
@Article{anrichterEvol, @InProceedings{anrichterEvol,
Title = {Evolvability as a Quality Criterion for Linear Deformation Representations in Evolutionary Optimization}, Title = {Evolvability as a Quality Criterion for Linear Deformation Representations in Evolutionary Optimization},
Author = {Richter, Andreas and Achenbach, Jascha and Menzel, Stefan and Botsch, Mario}, Author = {Richter, Andreas and Achenbach, Jascha and enzel, Stefan and Botsch, Mario},
Year = {2016}, Year = {2016},
Note = {\url{http://graphics.uni-bielefeld.de/publications/cec16.pdf}, \url{https://pub.uni-bielefeld.de/publication/2902698}}, Note = {\url{http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=cec16.pdf}, \url{https://pub.uni-bielefeld.de/publication/2902698}},
Booktitle = {IEEE Congress on Evolutionary Computation}, Booktitle = {IEEE Congress on Evolutionary Computation},
Pages = {901--910},
Location = {Vancouver, Canada}, Location = {Vancouver, Canada},
Publisher = {IEEE} Publisher = {IEEE}
} }
@ -100,12 +101,12 @@
@InProceedings{richter2015evolvability, @InProceedings{richter2015evolvability,
Title = {Evolvability of representations in complex system engineering: a survey}, Title = {Evolvability of representations in complex system engineering: a survey},
Author = {Richter, Andreas and Botsch, Mario and Menzel, Stefan}, Author = {Richter, Andreas and Botsch, Mario and Menzel, Stefan},
Booktitle = {Evolutionary Computation (CEC), 2015 IEEE Congress on}, Booktitle = {2015 IEEE Congress on Evolutionary Computation (CEC)},
Year = {2015}, Year = {2015},
Organization = {IEEE}, Organization = {IEEE},
Pages = {1327--1335}, Pages = {1327--1335},
Url = {http://www.graphics.uni-bielefeld.de/publications/cec15.pdf} Url = {http://www.graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=cec15.pdf}
} }
@InBook{Rothlauf2006, @InBook{Rothlauf2006,
@ -168,8 +169,6 @@
Year = {2012}, Year = {2012},
Number = {5}, Number = {5},
Volume = {27}, Volume = {27},
Url = {http://jcst.ict.ac.cn:8080/jcst/EN/article/downloadArticleFile.do?attachType=PDF\&id=9543}
} }
@article{giannelli2012thb, @article{giannelli2012thb,
title={THB-splines: The truncated basis for hierarchical splines}, title={THB-splines: The truncated basis for hierarchical splines},
@ -180,17 +179,17 @@
pages={485--498}, pages={485--498},
year={2012}, year={2012},
publisher={Elsevier}, publisher={Elsevier},
url={https://pdfs.semanticscholar.org/a858/aa68da617ad9d41de021f6807cc422002258.pdf}, note={\url{https://pdfs.semanticscholar.org/a858/aa68da617ad9d41de021f6807cc422002258.pdf}},
doi={10.1016/j.cagd.2012.03.025}, doi={10.1016/j.cagd.2012.03.025},
} }
@article{brunet2010contributions, @article{brunet2010contributions,
title={Contributions to parametric image registration and 3d surface reconstruction}, title={Contributions to parametric image registration and 3d surface reconstruction},
author={Brunet, Florent}, author={Brunet, Florent},
journal={European Ph. D. in Computer Vision, Universit{\'e} dAuvergne, Cl{\'e}rmont-Ferrand, France, and Technische Universitat Munchen, Germany}, journal={European Ph. D. in Computer Vision, Universit{\'e} dAuvergne, Cl{\'e}rmont-Ferrand, France, and Technische Universität München, Germany},
year={2010}, year={2010},
url={http://www.brnt.eu/phd/} url={http://www.brnt.eu/phd/}
} }
@article{aschenbach2015, @InProceedings{aschenbach2015,
author = {Achenbach, Jascha and Zell, Eduard and Botsch, Mario}, author = {Achenbach, Jascha and Zell, Eduard and Botsch, Mario},
booktitle = {Vision, Modeling \& Visualization}, booktitle = {Vision, Modeling \& Visualization},
journal = {Proceedings of Vision, Modeling and Visualization}, journal = {Proceedings of Vision, Modeling and Visualization},
@ -199,7 +198,7 @@
publisher = {Eurographics Association}, publisher = {Eurographics Association},
title = {Accurate Face Reconstruction through Anisotropic Fitting and Eye Correction}, title = {Accurate Face Reconstruction through Anisotropic Fitting and Eye Correction},
year = {2015}, year = {2015},
url = {http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=vmv15.pdf}, note = {\url{http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=vmv15.pdf}},
ISBN = {978-3-905674-95-8}, ISBN = {978-3-905674-95-8},
} }
@article{hauke2011comparison, @article{hauke2011comparison,
@ -247,7 +246,7 @@
publisher={IEEE}, publisher={IEEE},
url={https://www.researchgate.net/profile/Marc_Schoenauer/publication/223460374_Parameter_Control_in_Evolutionary_Algorithms/links/545766440cf26d5090a9b951.pdf}, url={https://www.researchgate.net/profile/Marc_Schoenauer/publication/223460374_Parameter_Control_in_Evolutionary_Algorithms/links/545766440cf26d5090a9b951.pdf},
} }
@article{rechenberg1973evolutionsstrategie, @book{rechenberg1973evolutionsstrategie,
title={Evolutionsstrategie Optimierung technischer Systeme nach Prinzipien der biologischen Evolution}, title={Evolutionsstrategie Optimierung technischer Systeme nach Prinzipien der biologischen Evolution},
author={Rechenberg, Ingo}, author={Rechenberg, Ingo},
year={1973}, year={1973},

View File

@ -44,22 +44,23 @@
\vspace*{\stretch{4}} \vspace*{\stretch{4}}
\begin{center} \begin{center}
\hspace{0.99cm} {\huge\bfseries Evaluation of the Performance\\[4mm] \hspace{1.5cm} {\huge\bfseries Evaluation of the Performance\\[-3mm]
\hspace{0.99cm} of Randomized FFD Control Grids}\\[28mm] \hspace{1.5cm} of Randomized\\[4mm]
\hspace{0.99cm} {\LARGE Master Thesis}\\[3mm] \hspace{1.5cm} FFD Control Grids}\\[25mm]
\hspace{0.99cm} {\Large {\normalsize at the}\\[4mm] \hspace{1.5cm} {\LARGE Master Thesis}\\
\hspace{0.99cm} AG Computer Graphics}\\[2mm] \hspace{1.5cm} {\Large {\normalsize at the}\\
\hspace{0.99cm} at the Faculty of Technology\\ \hspace{1.5cm} AG Computer Graphics}\\[2mm]
\hspace{0.99cm} of Bielefeld University\\[5mm] \hspace{1.5cm} at the Faculty of Technology\\
\hspace{0.99cm} {\Large by}\\[5mm] \hspace{1.5cm} of Bielefeld University\\[3mm]
\hspace{0.99cm} {\LARGE Stefan Dresselhaus}\\[8mm] \hspace{1.5cm} {\Large by}\\[3mm]
\hspace{0.99cm} {\large \today} \hspace{1.5cm} {\LARGE Stefan Dresselhaus}\\[5mm]
\hspace{1.5cm} {\large \today}
\end{center} \end{center}
\vspace*{\stretch{2}} \vspace*{\stretch{2}}
\begin{center} \begin{center}
\begin{tabular}{lrl} \begin{tabular}{lrl}
\hspace{0.99cm} Supervisor:~&Prof.~Dr.~&Mario Botsch\\ \hspace{1.5cm} Supervisor:~&Prof.~Dr.~&Mario Botsch\\[-5mm]
\hspace{0.99cm} &Dipl.~Math.~&Andreas~Richter \hspace{1.5cm} &Dipl.~Math.~&Andreas~Richter
\end{tabular} \end{tabular}
\end{center} \end{center}
\vspace*{\stretch{.2}} \vspace*{\stretch{.2}}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 59 KiB

View File

@ -15,7 +15,10 @@
id="svg2" id="svg2"
version="1.1" version="1.1"
inkscape:version="0.91 r13725" inkscape:version="0.91 r13725"
sodipodi:docname="enoughCP.svg"> sodipodi:docname="enoughCP.svg"
inkscape:export-filename="/home/sdressel/git/graphene/ausarbeitung/arbeit/img/enoughCP.png"
inkscape:export-xdpi="150"
inkscape:export-ydpi="150">
<defs <defs
id="defs4" /> id="defs4" />
<sodipodi:namedview <sodipodi:namedview
@ -26,7 +29,7 @@
inkscape:pageopacity="0.0" inkscape:pageopacity="0.0"
inkscape:pageshadow="2" inkscape:pageshadow="2"
inkscape:zoom="1.979899" inkscape:zoom="1.979899"
inkscape:cx="128.58458" inkscape:cx="127.54562"
inkscape:cy="179.18795" inkscape:cy="179.18795"
inkscape:document-units="px" inkscape:document-units="px"
inkscape:current-layer="layer1" inkscape:current-layer="layer1"
@ -39,9 +42,9 @@
fit-margin-right="0" fit-margin-right="0"
fit-margin-bottom="0" fit-margin-bottom="0"
inkscape:window-width="1920" inkscape:window-width="1920"
inkscape:window-height="1141" inkscape:window-height="1015"
inkscape:window-x="1680" inkscape:window-x="1920"
inkscape:window-y="0" inkscape:window-y="36"
inkscape:window-maximized="1" /> inkscape:window-maximized="1" />
<metadata <metadata
id="metadata7"> id="metadata7">
@ -51,7 +54,7 @@
<dc:format>image/svg+xml</dc:format> <dc:format>image/svg+xml</dc:format>
<dc:type <dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title> <dc:title />
</cc:Work> </cc:Work>
</rdf:RDF> </rdf:RDF>
</metadata> </metadata>
@ -211,13 +214,13 @@
cy="578.05811" cy="578.05811"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-5-9" id="path4138-5-9"
cx="367.18387" cx="367.18387"
cy="578.05811" cy="578.05811"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-35-2" id="path4138-35-2"
cx="403.05746" cx="403.05746"
cy="578.05811" cy="578.05811"
@ -265,7 +268,7 @@
cy="612.91461" cy="612.91461"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-7-9" id="path4138-7-9"
cx="331.31027" cx="331.31027"
cy="612.91461" cy="612.91461"
@ -283,7 +286,7 @@
cy="612.91461" cy="612.91461"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-62-8" id="path4138-62-8"
cx="438.93106" cx="438.93106"
cy="612.91461" cy="612.91461"
@ -325,7 +328,7 @@
cy="647.77112" cy="647.77112"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-7-3" id="path4138-7-3"
cx="331.31027" cx="331.31027"
cy="647.77112" cy="647.77112"
@ -343,7 +346,7 @@
cy="647.77112" cy="647.77112"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-62-6" id="path4138-62-6"
cx="438.93106" cx="438.93106"
cy="647.77112" cy="647.77112"
@ -391,13 +394,13 @@
cy="682.62762" cy="682.62762"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-5-5" id="path4138-5-5"
cx="367.18387" cx="367.18387"
cy="682.62762" cy="682.62762"
r="5" /> r="5" />
<circle <circle
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000" style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
id="path4138-35-4" id="path4138-35-4"
cx="403.05746" cx="403.05746"
cy="682.62762" cy="682.62762"

Before

Width:  |  Height:  |  Size: 18 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -49,19 +49,19 @@ etc.), the translation of the problem--domain into a simple parametric
representation (the *genome*) can be challenging. representation (the *genome*) can be challenging.
This translation is often necessary as the target of the optimization may have This translation is often necessary as the target of the optimization may have
too many degrees of freedom. In the example of an aerodynamic simulation of drag too many degrees of freedom for a reasonable computation. In the example of an
onto an object, those object--designs tend to have a high number of vertices to aerodynamic simulation of drag onto an object, those object--designs tend to
adhere to various requirements (visual, practical, physical, etc.). A simpler have a high number of vertices to adhere to various requirements (visual,
representation of the same object in only a few parameters that manipulate the practical, physical, etc.). A simpler representation of the same object in only
whole in a sensible matter are desirable, as this often decreases the a few parameters that manipulate the whole in a sensible matter are desirable,
computation time significantly. as this often decreases the computation time significantly.
Additionally one can exploit the fact, that drag in this case is especially Additionally one can exploit the fact, that drag in this case is especially
sensitive to non--smooth surfaces, so that a smooth local manipulation of the sensitive to non--smooth surfaces, so that a smooth local manipulation of the
surface as a whole is more advantageous than merely random manipulation of the surface as a whole is more advantageous than merely random manipulation of the
vertices. vertices.
The quality of such a low-dimensional representation in biological evolution is The quality of such a low--dimensional representation in biological evolution is
strongly tied to the notion of *evolvability*\cite{wagner1996complex}, as the strongly tied to the notion of *evolvability*\cite{wagner1996complex}, as the
parametrization of the problem has serious implications on the convergence speed parametrization of the problem has serious implications on the convergence speed
and the quality of the solution\cite{Rothlauf2006}. and the quality of the solution\cite{Rothlauf2006}.
@ -80,14 +80,14 @@ One example of such a general representation of an object is to generate random
points and represent vertices of an object as distances to these points --- for points and represent vertices of an object as distances to these points --- for
example via \acf{RBF}. If one (or the algorithm) would move such a point the example via \acf{RBF}. If one (or the algorithm) would move such a point the
object will get deformed only locally (due to the \ac{RBF}). As this results in object will get deformed only locally (due to the \ac{RBF}). As this results in
a simple mapping from the parameter-space onto the object one can try out a simple mapping from the parameter--space onto the object one can try out
different representations of the same object and evaluate which criteria may be different representations of the same object and evaluate which criteria may be
suited to describe this notion of *evolvability*. This is exactly what Richter suited to describe this notion of *evolvability*. This is exactly what Richter
et al.\cite{anrichterEvol} have done. et al.\cite{anrichterEvol} have done.
As we transfer the results of Richter et al.\cite{anrichterEvol} from using As we transfer the results of Richter et al.\cite{anrichterEvol} from using
\acf{RBF} as a representation to manipulate geometric objects to the use of \acf{RBF} as a representation to manipulate geometric objects to the use of
\acf{FFD} we will use the same definition for evolvability the original author \acf{FFD} we will use the same definition for *evolvability* the original author
used, namely *regularity*, *variability*, and *improvement potential*. We used, namely *regularity*, *variability*, and *improvement potential*. We
introduce these term in detail in Chapter \ref{sec:intro:rvi}. In the original introduce these term in detail in Chapter \ref{sec:intro:rvi}. In the original
publication the author could show a correlation between these publication the author could show a correlation between these
@ -95,7 +95,7 @@ evolvability--criteria with the quality and convergence speed of such
optimization. optimization.
We will replicate the same setup on the same objects but use \acf{FFD} instead of We will replicate the same setup on the same objects but use \acf{FFD} instead of
\acf{RBF} to create a local deformation near the control points and evaluate if \acf{RBF} to create a local deformation near the control--points and evaluate if
the evolution--criteria still work as a predictor for *evolvability* of the the evolution--criteria still work as a predictor for *evolvability* of the
representation given the different deformation scheme, as suspected in representation given the different deformation scheme, as suspected in
\cite{anrichterEvol}. \cite{anrichterEvol}.
@ -106,8 +106,8 @@ take an abstract look at the definition of \ac{FFD} for a one--dimensional line
(in \ref{sec:back:ffdgood}). (in \ref{sec:back:ffdgood}).
Then we establish some background--knowledge of evolutionary algorithms (in Then we establish some background--knowledge of evolutionary algorithms (in
\ref{sec:back:evo}) and why this is useful in our domain (in \ref{sec:back:evo}) and why this is useful in our domain (in
\ref{sec:back:evogood}) followed by the definition of the different evolvability \ref{sec:back:evogood}) followed by the definition of the different
criteria established in \cite{anrichterEvol} (in \ref {sec:intro:rvi}). evolvability--criteria established in \cite{anrichterEvol} (in \ref {sec:intro:rvi}).
In Chapter \ref{sec:impl} we take a look at our implementation of \ac{FFD} and In Chapter \ref{sec:impl} we take a look at our implementation of \ac{FFD} and
the adaptation for 3D--meshes that were used. Next, in Chapter \ref{sec:eval}, the adaptation for 3D--meshes that were used. Next, in Chapter \ref{sec:eval},
@ -132,19 +132,20 @@ chapter \ref{3dffd}.
The main idea of \ac{FFD} is to create a function $s : [0,1[^d \mapsto The main idea of \ac{FFD} is to create a function $s : [0,1[^d \mapsto
\mathbb{R}^d$ that spans a certain part of a vector--space and is only linearly \mathbb{R}^d$ that spans a certain part of a vector--space and is only linearly
parametrized by some special control points $p_i$ and an constant parametrized by some special control--points $p_i$ and an constant
attribution--function $a_i(u)$, so attribution--function $a_i(u)$, so
$$ $$
s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i} s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i}
$$ $$
can be thought of a representation of the inside of the convex hull generated by can be thought of a representation of the inside of the convex hull generated by
the control points where each point can be accessed by the right $u \in [0,1[^d$. the control--points where each position inside can be accessed by the right
$u \in [0,1[^d$.
\begin{figure}[!ht] \begin{figure}[!ht]
\begin{center} \begin{center}
\includegraphics[width=0.7\textwidth]{img/B-Splines.png} \includegraphics[width=0.7\textwidth]{img/B-Splines.png}
\end{center} \end{center}
\caption[Example of B-Splines]{Example of a parametrization of a line with \caption[Example of B--Splines]{Example of a parametrization of a line with
corresponding deformation to generate a deformed objet} corresponding deformation to generate a deformed objet}
\label{fig:bspline} \label{fig:bspline}
\end{figure} \end{figure}
@ -184,7 +185,7 @@ $$\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,
For a B--Spline For a B--Spline
$$s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i$$ $$s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i$$
these derivations yield $\frac{\partial^d}{\partial u} s(u) = 0$. these derivations yield $\left(\frac{\partial}{\partial u}\right)^d s(u) = 0$.
Another interesting property of these recursive polynomials is that they are Another interesting property of these recursive polynomials is that they are
continuous (given $d \ge 1$) as every $p_i$ gets blended in between $\tau_i$ and continuous (given $d \ge 1$) as every $p_i$ gets blended in between $\tau_i$ and
@ -193,21 +194,21 @@ in every step of the recursion.
This means that all changes are only a local linear combination between the This means that all changes are only a local linear combination between the
control--point $p_i$ to $p_{i+d+1}$ and consequently this yields to the control--point $p_i$ to $p_{i+d+1}$ and consequently this yields to the
convex--hull--property of B-Splines --- meaning, that no matter how we choose convex--hull--property of B--Splines --- meaning, that no matter how we choose
our coefficients, the resulting points all have to lie inside convex--hull of our coefficients, the resulting points all have to lie inside convex--hull of
the control--points. the control--points.
For a given point $v_i$ we can then calculate the contributions For a given point $s_i$ we can then calculate the contributions
$n_{i,j}~:=~N_{j,d,\tau}$ of each control point $p_j$ to get the $u_{i,j}~:=~N_{j,d,\tau}$ of each control point $p_j$ to get the
projection from the control--point--space into the object--space: projection from the control--point--space into the object--space:
$$ $$
v_i = \sum_j n_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p} s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
$$ $$
or written for all points at the same time: or written for all points at the same time:
$$ $$
\vec{v} = \vec{N} \vec{p} \vec{s} = \vec{U} \vec{p}
$$ $$
where $\vec{N}$ is the $n \times m$ transformation--matrix (later on called where $\vec{U}$ is the $n \times m$ transformation--matrix (later on called
**deformation matrix**) for $n$ object--space--points and $m$ control--points. **deformation matrix**) for $n$ object--space--points and $m$ control--points.
\begin{figure}[ht] \begin{figure}[ht]
@ -220,7 +221,7 @@ of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum
up to one (partition of unity). In this example, we use B--Splines of degree 2. up to one (partition of unity). In this example, we use B--Splines of degree 2.
The horizontal segment below the abscissa axis represents the domain of The horizontal segment below the abscissa axis represents the domain of
influence of the B--splines basis function, i.e. the interval on which they are influence of the B--splines basis function, i.e. the interval on which they are
not null. At a given point, there are at most $ d+1$ non-zero B--Spline basis not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis
functions (compact support).\grqq \newline functions (compact support).\grqq \newline
Note, that Brunet starts his index at $-d$ opposed to our definition, where we Note, that Brunet starts his index at $-d$ opposed to our definition, where we
start at $0$.} start at $0$.}
@ -228,8 +229,8 @@ start at $0$.}
\end{figure} \end{figure}
Furthermore B--Spline--basis--functions form a partition of unity for all, but Furthermore B--Spline--basis--functions form a partition of unity for all, but
the first and last $d$ control-points\cite{brunet2010contributions}. Therefore the first and last $d$ control--points\cite{brunet2010contributions}. Therefore
we later on use the border-points $d+1$ times, such that $\sum_j n_{i,j} p_j = p_i$ we later on use the border--points $d+1$ times, such that $\sum_j u_{i,j} p_j = p_i$
for these points. for these points.
The locality of the influence of each control--point and the partition of unity The locality of the influence of each control--point and the partition of unity
@ -240,8 +241,8 @@ was beautifully pictured by Brunet, which we included here as figure
\label{sec:back:ffdgood} \label{sec:back:ffdgood}
The usage of \ac{FFD} as a tool for manipulating follows directly from the The usage of \ac{FFD} as a tool for manipulating follows directly from the
properties of the polynomials and the correspondence to the control points. properties of the polynomials and the correspondence to the control--points.
Having only a few control points gives the user a nicer high--level--interface, as Having only a few control--points gives the user a nicer high--level--interface, as
she only needs to move these points and the model follows in an intuitive she only needs to move these points and the model follows in an intuitive
manner. The deformation is smooth as the underlying polygon is smooth as well manner. The deformation is smooth as the underlying polygon is smooth as well
and affects as many vertices of the model as needed. Moreover the changes are and affects as many vertices of the model as needed. Moreover the changes are
@ -317,7 +318,7 @@ the *phenotypes* make certain behaviour observable (algorithmically through our
*fitness--function*, biologically by the ability to survive and produce *fitness--function*, biologically by the ability to survive and produce
offspring). Any individual in our algorithm thus experience a biologically offspring). Any individual in our algorithm thus experience a biologically
motivated life cycle of inheriting genes from the parents, modified by mutations motivated life cycle of inheriting genes from the parents, modified by mutations
occurring, performing according to a fitness--metric and generating offspring occurring, performing according to a fitness--metric, and generating offspring
based on this. Therefore each iteration in the while--loop above is also often based on this. Therefore each iteration in the while--loop above is also often
named generation. named generation.
@ -346,7 +347,7 @@ The main algorithm just repeats the following steps:
- **Selection** takes a selection--function $s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu$ that - **Selection** takes a selection--function $s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu$ that
selects from the previously generated $I^\lambda$ children and optionally also selects from the previously generated $I^\lambda$ children and optionally also
the parents (denoted by the set $Q$ in the algorithm) using the the parents (denoted by the set $Q$ in the algorithm) using the
fitness--function $\Phi$. The result of this operation is the next Population *fitness--function* $\Phi$. The result of this operation is the next Population
of $\mu$ individuals. of $\mu$ individuals.
All these functions can (and mostly do) have a lot of hidden parameters that All these functions can (and mostly do) have a lot of hidden parameters that
@ -370,7 +371,7 @@ also take ancestry, distance of genes or groups of individuals into account.
\label{sec:back:evogood} \label{sec:back:evogood}
The main advantage of evolutionary algorithms is the ability to find optima of The main advantage of evolutionary algorithms is the ability to find optima of
general functions just with the help of a given fitness--function. Components general functions just with the help of a given *fitness--function*. Components
and techniques for evolutionary algorithms are specifically known to and techniques for evolutionary algorithms are specifically known to
help with different problems arising in the domain of help with different problems arising in the domain of
optimization\cite{weise2012evolutionary}. An overview of the typical problems optimization\cite{weise2012evolutionary}. An overview of the typical problems
@ -383,13 +384,13 @@ are shown in figure \ref{fig:probhard}.
\end{figure} \end{figure}
Most of the advantages stem from the fact that a gradient--based procedure has Most of the advantages stem from the fact that a gradient--based procedure has
only one point of observation from where it evaluates the next steps, whereas an usually only one point of observation from where it evaluates the next steps,
evolutionary strategy starts with a population of guessed solutions. Because an whereas an evolutionary strategy starts with a population of guessed solutions.
evolutionary strategy can be modified according to the problem--domain (i.e. by Because an evolutionary strategy can be modified according to the
the ideas given above) it can also approximate very difficult problems in an problem--domain (i.e. by the ideas given above) it can also approximate very
efficient manner and even self--tune parameters depending on the ancestry at difficult problems in an efficient manner and even self--tune parameters
runtime^[Some examples of this are explained in detail in depending on the ancestry at runtime^[Some examples of this are explained in
\cite{eiben1999parameter}]. detail in \cite{eiben1999parameter}].
If an analytic best solution exists and is easily computable (i.e. because the If an analytic best solution exists and is easily computable (i.e. because the
error--function is convex) an evolutionary algorithm is not the right choice. error--function is convex) an evolutionary algorithm is not the right choice.
@ -421,23 +422,23 @@ coordinates
$$ $$
\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P} \Delta \vec{S} = \vec{U} \cdot \Delta \vec{P}
$$ $$
which is isomorphic to the former due to the linear correlation in the which is isomorphic to the former due to the linearity of the deformation. One
deformation. One can see in this way, that the way the deformation behaves lies can see in this way, that the way the deformation behaves lies solely in the
solely in the entries of $\vec{U}$, which is why the three criteria focus on this. entries of $\vec{U}$, which is why the three criteria focus on this.
### Variability ### Variability
In \cite{anrichterEvol} *variability* is defined as In \cite{anrichterEvol} *variability* is defined as
$$\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},$$ $$\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},$$
whereby $\vec{U}$ is the $n \times m$ deformation--Matrix used to map the $m$ whereby $\vec{U}$ is the $n \times m$ deformation--Matrix used to map the $m$
control points onto the $n$ vertices. control--points onto the $n$ vertices.
Given $n = m$, an identical number of control--points and vertices, this Given $n = m$, an identical number of control--points and vertices, this
quotient will be $=1$ if all control points are independent of each other and quotient will be $=1$ if all control--points are independent of each other and
the solution is to trivially move every control--point onto a target--point. the solution is to trivially move every control--point onto a target--point.
In praxis the value of $V(\vec{U})$ is typically $\ll 1$, because as In praxis the value of $V(\vec{U})$ is typically $\ll 1$, because there are only
there are only few control--points for many vertices, so $m \ll n$. few control--points for many vertices, so $m \ll n$.
This criterion should correlate to the degrees of freedom the given This criterion should correlate to the degrees of freedom the given
parametrization has. This can be seen from the fact, that parametrization has. This can be seen from the fact, that
@ -459,7 +460,7 @@ value of the deformation--matrix $\vec{U}$.
As we deform the given Object only based on the parameters as $\vec{p} \mapsto As we deform the given Object only based on the parameters as $\vec{p} \mapsto
f(\vec{x} + \vec{U}\vec{p})$ this makes sure that $\|\vec{Up}\| \propto f(\vec{x} + \vec{U}\vec{p})$ this makes sure that $\|\vec{Up}\| \propto
\|\vec{p}\|$ when $\kappa(\vec{U}) \approx 1$. The inversion of $\kappa(\vec{U})$ \|\vec{p}\|$ when $\kappa(\vec{U}) \approx 1$. The inversion of $\kappa(\vec{U})$
is only performed to map the criterion--range to $[0..1]$, whereas $1$ is the is only performed to map the criterion--range to $[0..1]$, where $1$ is the
optimal value and $0$ is the worst value. optimal value and $0$ is the worst value.
On the one hand this criterion should be characteristic for numeric On the one hand this criterion should be characteristic for numeric
@ -470,7 +471,7 @@ to the notion of locality\cite{weise2012evolutionary,thorhauer2014locality}.
### Improvement Potential ### Improvement Potential
In contrast to the general nature of *variability* and *regularity*, which are In contrast to the general nature of *variability* and *regularity*, which are
agnostic of the fitness--function at hand, the third criterion should reflect a agnostic of the *fitness--function* at hand, the third criterion should reflect a
notion of the potential for optimization, taking a guess into account. notion of the potential for optimization, taking a guess into account.
Most of the times some kind of gradient $g$ is available to suggest a Most of the times some kind of gradient $g$ is available to suggest a
@ -509,7 +510,7 @@ As we have established in Chapter \ref{sec:back:ffd} we can define an
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i \Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
\end{equation} \end{equation}
Note that we only sum up the $\Delta$--displacements in the control points $c_i$ to get Note that we only sum up the $\Delta$--displacements in the control--points $c_i$ to get
the change in position of the point we are interested in. the change in position of the point we are interested in.
In this way every deformed vertex is defined by In this way every deformed vertex is defined by
@ -539,8 +540,8 @@ and do a gradient--descend to approximate the value of $u$ up to an $\epsilon$ o
For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which
converges into the least--squares solution. An exact solution of this problem is converges into the least--squares solution. An exact solution of this problem is
impossible most of the times, because we usually have way more vertices impossible most of the time, because we usually have way more vertices
than control points ($\#v~\gg~\#c$). than control--points ($\#v~\gg~\#c$).
## Adaption of \ac{FFD} for a 3D--Mesh ## Adaption of \ac{FFD} for a 3D--Mesh
\label{3dffd} \label{3dffd}
@ -550,7 +551,7 @@ chapter. But this time things get a bit more complicated. As we have a
3--dimensional grid we may have a different amount of control--points in each 3--dimensional grid we may have a different amount of control--points in each
direction. direction.
Given $n,m,o$ control points in $x,y,z$--direction each Point on the curve is Given $n,m,o$ control--points in $x,y,z$--direction each Point on the curve is
defined by defined by
$$V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.$$ $$V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.$$
@ -624,13 +625,13 @@ beneficial for a good behaviour of the evolutionary algorithm.
As mentioned in chapter \ref{sec:back:evo}, the way of choosing the As mentioned in chapter \ref{sec:back:evo}, the way of choosing the
representation to map the general problem (mesh--fitting/optimization in our representation to map the general problem (mesh--fitting/optimization in our
case) into a parameter-space is very important for the quality and runtime of case) into a parameter--space is very important for the quality and runtime of
evolutionary algorithms\cite{Rothlauf2006}. evolutionary algorithms\cite{Rothlauf2006}.
Because our control--points are arranged in a grid, we can accurately represent Because our control--points are arranged in a grid, we can accurately represent
each vertex--point inside the grids volume with proper B--Spline--coefficients each vertex--point inside the grids volume with proper B--Spline--coefficients
between $[0,1[$ and --- as a consequence --- we have to embed our object into it between $[0,1[$ and --- as a consequence --- we have to embed our object into it
(or create constant "dummy"-points outside). (or create constant "dummy"--points outside).
The great advantage of B--Splines is the local, direct impact of each The great advantage of B--Splines is the local, direct impact of each
control point without having a $1:1$--correlation, and a smooth deformation. control point without having a $1:1$--correlation, and a smooth deformation.
@ -651,20 +652,20 @@ control--points.}
One would normally think, that the more control--points you add, the better the One would normally think, that the more control--points you add, the better the
result will be, but this is not the case for our B--Splines. Given any point result will be, but this is not the case for our B--Splines. Given any point
$\vec{p}$ only the $2 \cdot (d-1)$ control--points contribute to the parametrization of $\vec{p}$ only the $2 \cdot (d-1)$ control--points contribute to the parametrization of
that point^[Normally these are $d-1$ to each side, but at the boundaries the that point^[Normally these are $d-1$ to each side, but at the boundaries border
number gets increased to the inside to meet the required smoothness]. points get used multiple times to meet the number of points required].
This means, that a high resolution can have many control-points that are not This means, that a high resolution can have many control--points that are not
contributing to any point on the surface and are thus completely irrelevant to contributing to any point on the surface and are thus completely irrelevant to
the solution. the solution.
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the four red We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the red
central points are not relevant for the parametrization of the circle. This central points are not relevant for the parametrization of the circle. This
leads to artefacts in the deformation--matrix $\vec{U}$, as the columns leads to artefacts in the deformation--matrix $\vec{U}$, as the columns
corresponding to those control--points are $0$. corresponding to those control--points are $0$.
This leads to useless increased complexity, as the parameters corresponding to This also leads to useless increased complexity, as the parameters corresponding
those points will never have any effect, but a naive algorithm will still try to to those points will never have any effect, but a naive algorithm will still try
optimize them yielding numeric artefacts in the best and non--terminating or to optimize them yielding numeric artefacts in the best and non--terminating or
ill--defined solutions^[One example would be, when parts of an algorithm depend ill--defined solutions^[One example would be, when parts of an algorithm depend
on the inverse of the minimal right singular value leading to a division by $0$.] on the inverse of the minimal right singular value leading to a division by $0$.]
at worst. at worst.
@ -674,18 +675,18 @@ but this raises the question why they were introduced in the first place. We
will address this in a special scenario in \ref{sec:res:3d:var}. will address this in a special scenario in \ref{sec:res:3d:var}.
For our tests we chose different uniformly sized grids and added noise For our tests we chose different uniformly sized grids and added noise
onto each control-point^[For the special case of the outer layer we only applied onto each control--point^[For the special case of the outer layer we only applied
noise away from the object, so the object is still confined in the convex hull noise away from the object, so the object is still confined in the convex hull
of the control--points.] to simulate different starting-conditions. of the control--points.] to simulate different starting--conditions.
# Scenarios for testing evolvability criteria using \ac{FFD} # Scenarios for testing evolvability--criteria using \ac{FFD}
\label{sec:eval} \label{sec:eval}
In our experiments we use the same two testing--scenarios, that were also used In our experiments we use the same two testing--scenarios, that were also used
by \cite{anrichterEvol}. The first scenario deforms a plane into a shape by Richter et al.\cite{anrichterEvol} The first scenario deforms a plane into a shape
originally defined in \cite{giannelli2012thb}, where we setup control-points in originally defined by Giannelli et al.\cite{giannelli2012thb}, where we setup
a 2--dimensional manner and merely deform in the height--coordinate to get the control--points in a 2--dimensional manner and merely deform in the
resulting shape. height--coordinate to get the resulting shape.
In the second scenario we increase the degrees of freedom significantly by using In the second scenario we increase the degrees of freedom significantly by using
a 3--dimensional control--grid to deform a sphere into a face, so each control a 3--dimensional control--grid to deform a sphere into a face, so each control
@ -717,7 +718,7 @@ including a wireframe--overlay of the vertices.}
\label{fig:1dtarget} \label{fig:1dtarget}
\end{figure} \end{figure}
As the starting-plane we used the same shape, but set all As the starting--plane we used the same shape, but set all
$z$--coordinates to $0$, yielding a flat plane, which is partially already $z$--coordinates to $0$, yielding a flat plane, which is partially already
correct. correct.
@ -728,10 +729,10 @@ of calculating the squared distances for each corresponding vertex
\end{equation} \end{equation}
where $t_i$ are the respective target--vertices to the parametrized where $t_i$ are the respective target--vertices to the parametrized
source--vertices^[The parametrization is encoded in $\vec{U}$ and the initial source--vertices^[The parametrization is encoded in $\vec{U}$ and the initial
position of the control points. See \ref{sec:ffd:adapt}] with the current position of the control--points. See \ref{sec:ffd:adapt}] with the current
deformation--parameters $\vec{p} = (p_1,\dots, p_m)$. We can do this deformation--parameters $\vec{p} = (p_1,\dots, p_m)$. We can do this
one--to--one--correspondence because we have exactly the same number of one--to--one--correspondence because we have exactly the same number of
source and target-vertices do to our setup of just flattening the object. source and target--vertices do to our setup of just flattening the object.
This formula is also the least--squares approximation error for which we This formula is also the least--squares approximation error for which we
can compute the analytic solution $\vec{p^{*}} = \vec{U^+}\vec{t}$, yielding us can compute the analytic solution $\vec{p^{*}} = \vec{U^+}\vec{t}$, yielding us
@ -762,16 +763,16 @@ these Models can be seen in figure \ref{fig:3dtarget}.
Opposed to the 1D--case we cannot map the source and target--vertices in a Opposed to the 1D--case we cannot map the source and target--vertices in a
one--to--one--correspondence, which we especially need for the approximation of one--to--one--correspondence, which we especially need for the approximation of
the fitting--error. Hence we state that the error of one vertex is the distance the fitting--error. Hence we state that the error of one vertex is the distance
to the closest vertex of the other model and sum up the error from the to the closest vertex of the respective other model and sum up the error from
respective source and target. the source and target.
We therefore define the *fitness--function* to be: We therefore define the *fitness--function* to be:
\begin{equation} \begin{equation}
\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} - \mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} -
\vec{s_i}\|_2^2}_{\textrm{source-to-target--distance}} \vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}}
+ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} - + \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} -
\vec{t_i}\|_2^2}_{\textrm{target-to-source--distance}} \vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}}
+ \lambda \cdot \textrm{regularization}(\vec{P}) + \lambda \cdot \textrm{regularization}(\vec{P})
\label{eq:fit3d} \label{eq:fit3d}
\end{equation} \end{equation}
@ -787,7 +788,7 @@ $n \times m$--matrix of calculated coefficients for the \ac{FFD} --- analog to
the 1D case --- and finally $\vec{P}$ being the $m \times 3$--matrix of the the 1D case --- and finally $\vec{P}$ being the $m \times 3$--matrix of the
control--grid defining the whole deformation. control--grid defining the whole deformation.
As regularization-term we add a weighted Laplacian of the deformation that has As regularization--term we add a weighted Laplacian of the deformation that has
been used before by Aschenbach et al.\cite[Section 3.2]{aschenbach2015} on been used before by Aschenbach et al.\cite[Section 3.2]{aschenbach2015} on
similar models and was shown to lead to a more precise fit. The Laplacian similar models and was shown to lead to a more precise fit. The Laplacian
\begin{equation} \begin{equation}
@ -812,7 +813,7 @@ ill--defined grids mentioned in section \ref{sec:impl:grid}.
To compare our results to the ones given by Richter et al.\cite{anrichterEvol}, To compare our results to the ones given by Richter et al.\cite{anrichterEvol},
we also use Spearman's rank correlation coefficient. Opposed to other popular we also use Spearman's rank correlation coefficient. Opposed to other popular
coefficients, like the Pearson correlation coefficient, which measures a linear coefficients, like the Pearson correlation coefficient, which measures a linear
relationship between variables, the Spearmans's coefficient assesses \glqq how relationship between variables, the Spearman's coefficient assesses \glqq how
well an arbitrary monotonic function can describe the relationship between two well an arbitrary monotonic function can describe the relationship between two
variables, without making any assumptions about the frequency distribution of variables, without making any assumptions about the frequency distribution of
the variables\grqq\cite{hauke2011comparison}. the variables\grqq\cite{hauke2011comparison}.
@ -846,18 +847,19 @@ well. We leave the parameters at their sensible defaults as further explained in
\label{sec:proc:1d} \label{sec:proc:1d}
For our setup we first compute the coefficients of the deformation--matrix and For our setup we first compute the coefficients of the deformation--matrix and
use then the formulas for *variability* and *regularity* to get our predictions. use the formulas for *variability* and *regularity* to get our predictions.
Afterwards we solve the problem analytically to get the (normalized) correct Afterwards we solve the problem analytically to get the (normalized) correct
gradient that we use as guess for the *improvement potential*. To check we also gradient that we use as guess for the *improvement potential*. To further test
consider a distorted gradient $\vec{g}_{\mathrm{d}}$ the *improvement potential* we also consider a distorted gradient
$\vec{g}_{\mathrm{d}}$:
$$ $$
\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|} \vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|}
$$ $$
where $\mathbb{1}$ is the vector consisting of $1$ in every dimension, where $\mathbb{1}$ is the vector consisting of $1$ in every dimension,
$\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}$ is the calculated correct gradient, $\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}$ is the calculated correct gradient,
and $\mu$ is used to blend between $\vec{g}_\mathrm{c}$ and $\mathbb{1}$. As and $\mu$ is used to blend between $\vec{g}_\mathrm{c}$ and $\mathbb{1}$. As
we always start with a gradient of $p = \mathbb{0}$ this means shortens we always start with a gradient of $p = \mathbb{0}$ this means we can shorten
$\vec{g}_\mathrm{c} = \vec{p^{*}}$. the definition of $\vec{g}_\mathrm{c}$ to $\vec{g}_\mathrm{c} = \vec{p^{*}}$.
\begin{figure}[ht] \begin{figure}[ht]
\begin{center} \begin{center}
@ -870,9 +872,9 @@ random distortion to generate a testcase.}
We then set up a regular 2--dimensional grid around the object with the desired We then set up a regular 2--dimensional grid around the object with the desired
grid resolutions. To generate a testcase we then move the grid--vertices grid resolutions. To generate a testcase we then move the grid--vertices
randomly inside the x--y--plane. As self-intersecting grids get tricky to solve randomly inside the x--y--plane. As self--intersecting grids get tricky to solve
with our implemented newtons--method we avoid the generation of such with our implemented newtons--method (see section \ref{3dffd}) we avoid the
self--intersecting grids for our testcases (see section \ref{3dffd}). generation of such self--intersecting grids for our testcases.
To achieve that we generated a gaussian distributed number with $\mu = 0, \sigma=0.25$ To achieve that we generated a gaussian distributed number with $\mu = 0, \sigma=0.25$
and clamped it to the range $[-0.25,0.25]$. We chose such an $r \in [-0.25,0.25]$ and clamped it to the range $[-0.25,0.25]$. We chose such an $r \in [-0.25,0.25]$
@ -899,11 +901,11 @@ In the case of our 1D--Optimization--problem, we have the luxury of knowing the
analytical solution to the given problem--set. We use this to experimentally analytical solution to the given problem--set. We use this to experimentally
evaluate the quality criteria we introduced before. As an evolutional evaluate the quality criteria we introduced before. As an evolutional
optimization is partially a random process, we use the analytical solution as a optimization is partially a random process, we use the analytical solution as a
stopping-criteria. We measure the convergence speed as number of iterations the stopping--criteria. We measure the convergence speed as number of iterations the
evolutional algorithm needed to get within $1.05 \times$ of the optimal solution. evolutional algorithm needed to get within $1.05 \times$ of the optimal solution.
We used different regular grids that we manipulated as explained in Section We used different regular grids that we manipulated as explained in Section
\ref{sec:proc:1d} with a different number of control points. As our grids have \ref{sec:proc:1d} with a different number of control--points. As our grids have
to be the product of two integers, we compared a $5 \times 5$--grid with $25$ to be the product of two integers, we compared a $5 \times 5$--grid with $25$
control--points to a $4 \times 7$ and $7 \times 4$--grid with $28$ control--points to a $4 \times 7$ and $7 \times 4$--grid with $28$
control--points. This was done to measure the impact an \glqq improper\grqq \ control--points. This was done to measure the impact an \glqq improper\grqq \
@ -924,7 +926,7 @@ Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.
\label{fig:1dvar} \label{fig:1dvar}
\end{figure} \end{figure}
Variability should characterize the potential for design space exploration and *Variability* should characterize the potential for design space exploration and
is defined in terms of the normalized rank of the deformation matrix $\vec{U}$: is defined in terms of the normalized rank of the deformation matrix $\vec{U}$:
$V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}$, whereby $n$ is the number of $V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}$, whereby $n$ is the number of
vertices. vertices.
@ -933,27 +935,27 @@ grid), we have merely plotted the errors in the box plot in figure
\ref{fig:1dvar} \ref{fig:1dvar}
It is also noticeable, that although the $7 \times 4$ and $4 \times 7$ grids It is also noticeable, that although the $7 \times 4$ and $4 \times 7$ grids
have a higher variability, they perform not better than the $5 \times 5$ grid. have a higher *variability*, they perform not better than the $5 \times 5$ grid.
Also the $7 \times 4$ and $4 \times 7$ grids differ distinctly from each other Also the $7 \times 4$ and $4 \times 7$ grids differ distinctly from each other
with a mean$\pm$sigma of $233.09 \pm 12.32$ for the former and $286.32 \pm 22.36$ for the with a mean$\pm$sigma of $233.09 \pm 12.32$ for the former and $286.32 \pm 22.36$ for the
latter, although they have the same number of control--points. This is an latter, although they have the same number of control--points. This is an
indication of an impact a proper or improper grid--setup can have. We do not indication of an impact a proper or improper grid--setup can have. We do not
draw scientific conclusions from these findings, as more research on non-squared draw scientific conclusions from these findings, as more research on non--squared
grids seem necessary. grids seem necessary.
Leaving the issue of the grid--layout aside we focused on grids having the same Leaving the issue of the grid--layout aside we focused on grids having the same
number of prototypes in every dimension. For the $5 \times 5$, $7 \times 7$ and number of prototypes in every dimension. For the $5 \times 5$, $7 \times 7$ and
$10 \times 10$ grids we found a *very strong* correlation ($-r_S = 0.94, p = 0$) $10 \times 10$ grids we found a *very strong* correlation ($-r_S = 0.94, p = 0$)
between the variability and the evolutionary error. between the *variability* and the evolutionary error.
### Regularity ### Regularity
\begin{figure}[tbh] \begin{figure}[tbh]
\centering \centering
\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png} \includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png}
\caption[Improvement potential and regularity vs. steps]{\newline \caption[Improvement potential and regularity against iterations]{\newline
Left: Improvement potential against steps until convergence\newline Left: *Improvement potential* against number of iterations until convergence\newline
Right: Regularity against steps until convergence\newline Right: *Regularity* against number of iterations until convergence\newline
Coloured by their grid--resolution, both with a linear fit over the whole Coloured by their grid--resolution, both with a linear fit over the whole
dataset.} dataset.}
\label{fig:1dreg} \label{fig:1dreg}
@ -966,15 +968,15 @@ $5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\
\hline \hline
$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$) $0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$)
\end{tabular} \end{tabular}
\caption[Correlation 1D Regularity/Steps]{Spearman's correlation (and p-values) \caption[Correlation 1D *regularity* against iterations]{Inverted Spearman's correlation (and p--values)
between regularity and convergence speed for the 1D function approximation between *regularity* and number of iterations for the 1D function approximation
problem. problem.
\newline Note: Not significant results are marked in \textcolor{red}{red}. \newline Note: Not significant results are marked in \textcolor{red}{red}.
} }
\label{tab:1dreg} \label{tab:1dreg}
\end{table} \end{table}
Regularity should correspond to the convergence speed (measured in *Regularity* should correspond to the convergence speed (measured in
iteration--steps of the evolutionary algorithm), and is computed as inverse iteration--steps of the evolutionary algorithm), and is computed as inverse
condition number $\kappa(\vec{U})$ of the deformation--matrix. condition number $\kappa(\vec{U})$ of the deformation--matrix.
@ -986,9 +988,9 @@ correlation of $- r_S = -0.72, p = 0$, that is opposed to our expectations.
To explain this discrepancy we took a closer look at what caused these high number To explain this discrepancy we took a closer look at what caused these high number
of iterations. In figure \ref{fig:1dreg} we also plotted the of iterations. In figure \ref{fig:1dreg} we also plotted the
improvement-potential against the steps next to the regularity--plot. Our theory *improvement potential* against the steps next to the *regularity*--plot. Our theory
is that the *very strong* correlation ($-r_S = -0.82, p=0$) between is that the *very strong* correlation ($-r_S = -0.82, p=0$) between
improvement--potential and number of iterations hints that the employed *improvement potential* and number of iterations hints that the employed
algorithm simply takes longer to converge on a better solution (as seen in algorithm simply takes longer to converge on a better solution (as seen in
figure \ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the figure \ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the
regularity--measurement could achieve. regularity--measurement could achieve.
@ -998,14 +1000,14 @@ regularity--measurement could achieve.
\begin{figure}[ht] \begin{figure}[ht]
\centering \centering
\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png} \includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png}
\caption[Correlation 1D Improvement vs. Error]{Improvement potential plotted \caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted
against the error yielded by the evolutionary optimization for different against the error yielded by the evolutionary optimization for different
grid--resolutions} grid--resolutions}
\label{fig:1dimp} \label{fig:1dimp}
\end{figure} \end{figure}
The improvement potential should correlate to the quality of the The *improvement potential* should correlate to the quality of the
fitting--result. We plotted the results for the tested grid-sizes $5 \times 5$, fitting--result. We plotted the results for the tested grid--sizes $5 \times 5$,
$7 \times 7$ and $10 \times 10$ in figure \ref{fig:1dimp}. We tested the $7 \times 7$ and $10 \times 10$ in figure \ref{fig:1dimp}. We tested the
$4 \times 7$ and $7 \times 4$ grids as well, but omitted them from the plot. $4 \times 7$ and $7 \times 4$ grids as well, but omitted them from the plot.
@ -1035,7 +1037,7 @@ Initially we set up the correspondences $\vec{c_T(\dots)}$ and $\vec{c_S(\dots)}
the respectively closest vertices of the other model. We then calculate the the respectively closest vertices of the other model. We then calculate the
analytical solution given these correspondences via $\vec{P^{*}} = \vec{U^+}\vec{T}$, analytical solution given these correspondences via $\vec{P^{*}} = \vec{U^+}\vec{T}$,
and also use the first solution as guessed gradient for the calculation of the and also use the first solution as guessed gradient for the calculation of the
*improvement--potential*, as the optimal solution is not known. *improvement potential*, as the optimal solution is not known.
We then let the evolutionary algorithm run up within $1.05$ times the error of We then let the evolutionary algorithm run up within $1.05$ times the error of
this solution and afterwards recalculate the correspondences $\vec{c_T(\dots)}$ this solution and afterwards recalculate the correspondences $\vec{c_T(\dots)}$
and $\vec{c_S(\dots)}$. and $\vec{c_S(\dots)}$.
@ -1063,11 +1065,11 @@ iterations until the regularization--effect wears off.
The grid we use for our experiments is just very coarse due to computational The grid we use for our experiments is just very coarse due to computational
limitations. We are not interested in a good reconstruction, but an estimate if limitations. We are not interested in a good reconstruction, but an estimate if
the mentioned evolvability criteria are good. the mentioned evolvability--criteria are good.
In figure \ref{fig:setup3d} we show an example setup of the scene with a In figure \ref{fig:setup3d} we show an example setup of the scene with a
$4\times 4\times 4$--grid. Identical to the 1--dimensional scenario before, we create a $4\times 4\times 4$--grid. Identical to the 1--dimensional scenario before, we create a
regular grid and move the control-points in the exact same random manner between regular grid and move the control--points in the exact same random manner between
their neighbours as described in section \ref{sec:proc:1d}, but in three instead their neighbours as described in section \ref{sec:proc:1d}, but in three instead
of two dimensions^[Again, we flip the signs for the edges, if necessary to have of two dimensions^[Again, we flip the signs for the edges, if necessary to have
the object still in the convex hull.]. the object still in the convex hull.].
@ -1083,16 +1085,16 @@ Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.}
As is clearly visible from figure \ref{fig:3dgridres}, the target--model has many As is clearly visible from figure \ref{fig:3dgridres}, the target--model has many
vertices in the facial area, at the ears and in the neck--region. Therefore we vertices in the facial area, at the ears and in the neck--region. Therefore we
chose to increase the grid-resolutions for our tests in two different dimensions chose to increase the grid--resolutions for our tests in two different dimensions
and see how well the criteria predict a suboptimal placement of these and see how well the criteria predict a suboptimal placement of these
control-points. control--points.
## Results of 3D Function Approximation ## Results of 3D Function Approximation
In the 3D--Approximation we tried to evaluate further on the impact of the In the 3D--Approximation we tried to evaluate further on the impact of the
grid--layout to the overall criteria. As the target--model has many vertices in grid--layout to the overall criteria. As the target--model has many vertices in
concentrated in the facial area we start from a $4 \times 4 \times 4$ grid and concentrated in the facial area we start from a $4 \times 4 \times 4$ grid and
only increase the number of control points in one dimension, yielding a only increase the number of control--points in one dimension, yielding a
resolution of $7 \times 4 \times 4$ and $4 \times 4 \times 7$ respectively. We resolution of $7 \times 4 \times 4$ and $4 \times 4 \times 7$ respectively. We
visualized those two grids in figure \ref{fig:3dgridres}. visualized those two grids in figure \ref{fig:3dgridres}.
@ -1121,10 +1123,10 @@ $4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \t
\hline \hline
0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0) 0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0)
\end{tabular} \end{tabular}
\caption[Correlation between variability and fitting error for 3D]{Correlation \caption[Correlation between *variability* and fitting error for 3D]{Correlation
between variability and fitting error for the 3D fitting scenario.\newline between *variability* and fitting error for the 3D fitting scenario.\newline
Displayed are the negated Spearman coefficients with the corresponding p-values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for three cases of increasing variability ($\mathrm{X} \in [4,5,7], in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7],
\mathrm{Y} \in [4,5,6]$). \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
\label{tab:3dvar} \label{tab:3dvar}
@ -1143,37 +1145,37 @@ Interestingly both variants end up closer in terms of fitting error than we
anticipated, which shows that the evolutionary algorithm we employed is capable anticipated, which shows that the evolutionary algorithm we employed is capable
of correcting a purposefully created \glqq bad\grqq \ grid. Also this confirms, of correcting a purposefully created \glqq bad\grqq \ grid. Also this confirms,
that in our cases the number of control--points is more important for quality that in our cases the number of control--points is more important for quality
than their placement, which is captured by the variability via the rank of the than their placement, which is captured by the *variability* via the rank of the
deformation--matrix. deformation--matrix.
Overall the correlation between *variability* and fitness--error were
*significant* and showed a *very strong* correlation in all our tests.
The detailed correlation--coefficients are given in table \ref{tab:3dvar}
alongside their p--values.
As introduces in section \ref{sec:impl:grid} and visualized in figure
\ref{fig:enoughCP}, we know, that not all control--points have to necessarily
contribute to the parametrization of our 3D--model. Because we are starting from
a sphere, some control--points are too far away from the surface to contribute
to the deformation at all.
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
starts with a regular $9 \times 9$ grid on a perfect circle. To make sure we
observe this, we evaluated the *variability* for 100 randomly moved $10 \times 10 \times 10$
grids on the sphere we start out with.
\begin{figure}[hbt] \begin{figure}[hbt]
\centering \centering
\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png} \includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png}
\caption[Histogram of ranks of high--resolution deformation--matrices]{ \caption[Histogram of ranks of high--resolution deformation--matrices]{
Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$ Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$
control--points each showing in this case how many control points are actually control--points each showing in this case how many control--points are actually
used in the calculations. used in the calculations.
} }
\label{fig:histrank3d} \label{fig:histrank3d}
\end{figure} \end{figure}
Overall the correlation between variability and fitness--error were As the *variability* is defined by $\frac{\mathrm{rank}(\vec{U})}{n}$ we can
*significant* and showed a *very strong* correlation in all our tests.
The detailed correlation--coefficients are given in table \ref{tab:3dvar}
alongside their p--values.
As introduces in section \ref{sec:impl:grid} and visualized in figure
\ref{fig:enoughCP}, we know, that not all control points have to necessarily
contribute to the parametrization of our 3D--model. Because we are starting from
a sphere, some control-points are too far away from the surface to contribute
to the deformation at all.
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
starts with a regular $9 \times 9$ grid on a perfect circle. To make sure we
observe this, we evaluated the variability for 100 randomly moved $10 \times 10 \times 10$
grids on the sphere we start out with.
As the variability is defined by $\frac{\mathrm{rank}(\vec{U})}{n}$ we can
easily recover the rank of the deformation--matrix $\vec{U}$. The results are easily recover the rank of the deformation--matrix $\vec{U}$. The results are
shown in the histogram in figure \ref{fig:histrank3d}. Especially in the centre shown in the histogram in figure \ref{fig:histrank3d}. Especially in the centre
of the sphere and in the corners of our grid we effectively loose of the sphere and in the corners of our grid we effectively loose
@ -1184,7 +1186,7 @@ to use and one should expect a loss in quality evident by a higher
reconstruction--error opposed to a grid where they are used. Sadly we could not reconstruction--error opposed to a grid where they are used. Sadly we could not
run a in--depth test on this due to computational limitations. run a in--depth test on this due to computational limitations.
Nevertheless this hints at the notion, that variability is a good measure for Nevertheless this hints at the notion, that *variability* is a good measure for
the overall quality of a fit. the overall quality of a fit.
### Regularity ### Regularity
@ -1212,19 +1214,19 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
\cline{2-4} \cline{2-4}
\multicolumn{3}{c}{} & all: 0.15 (0) \T \multicolumn{3}{c}{} & all: 0.15 (0) \T
\end{tabular} \end{tabular}
\caption[Correlation between regularity and iterations for 3D]{Correlation \caption[Correlation between *regularity* and iterations for 3D]{Correlation
between regularity and number of iterations for the 3D fitting scenario. between *regularity* and number of iterations for the 3D fitting scenario.
Displayed are the negated Spearman coefficients with the corresponding p--values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
\label{tab:3dreg} \label{tab:3dreg}
\end{table} \end{table}
Opposed to the predictions of variability our test on regularity gave a mixed Opposed to the predictions of *variability* our test on *regularity* gave a mixed
result --- similar to the 1D--case. result --- similar to the 1D--case.
In roughly half of the scenarios we have a *significant*, but *weak* to *moderate* In roughly half of the scenarios we have a *significant*, but *weak* to *moderate*
correlation between regularity and number of iterations. On the other hand in correlation between *regularity* and number of iterations. On the other hand in
the scenarios where we increased the number of control--points, namely $125$ for the scenarios where we increased the number of control--points, namely $125$ for
the $5 \times 5 \times 5$ grid and $216$ for the $6 \times 6 \times 6$ grid we found the $5 \times 5 \times 5$ grid and $216$ for the $6 \times 6 \times 6$ grid we found
a *significant*, but *weak* **anti**--correlation when taking all three tests into a *significant*, but *weak* **anti**--correlation when taking all three tests into
@ -1233,14 +1235,14 @@ findings/trends for the sets with $64$, $80$, and $112$ control--points
(first two rows of table \ref{tab:3dreg}). (first two rows of table \ref{tab:3dreg}).
Taking all results together we only find a *very weak*, but *significant* link Taking all results together we only find a *very weak*, but *significant* link
between regularity and the number of iterations needed for the algorithm to between *regularity* and the number of iterations needed for the algorithm to
converge. converge.
\begin{figure}[!htb] \begin{figure}[!htb]
\centering \centering
\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png} \includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png}
\caption[Regularity for different 3D--grids]{ \caption[Regularity for different 3D--grids]{
Plots of regularity against number of iterations for various scenarios together Plots of *regularity* against number of iterations for various scenarios together
with a linear fit to indicate trends.} with a linear fit to indicate trends.}
\label{fig:resreg3d} \label{fig:resreg3d}
\end{figure} \end{figure}
@ -1250,10 +1252,10 @@ the number of control--points helps the convergence--speeds. The
regularity--criterion first behaves as we would like to, but then switches to regularity--criterion first behaves as we would like to, but then switches to
behave exactly opposite to our expectations, as can be seen in the first three behave exactly opposite to our expectations, as can be seen in the first three
plots. While the number of control--points increases from red to green to blue plots. While the number of control--points increases from red to green to blue
and the number of iterations decreases, the regularity seems to increase at and the number of iterations decreases, the *regularity* seems to increase at
first, but then decreases again on higher grid--resolutions. first, but then decreases again on higher grid--resolutions.
This can be an artefact of the definition of regularity, as it is defined by the This can be an artefact of the definition of *regularity*, as it is defined by the
inverse condition--number of the deformation--matrix $\vec{U}$, being the inverse condition--number of the deformation--matrix $\vec{U}$, being the
fraction $\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}$ between the fraction $\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}$ between the
least and greatest right singular value. least and greatest right singular value.
@ -1264,9 +1266,9 @@ and so a small minimal right singular value occurring on higher
grid--resolutions seems likely the problem. grid--resolutions seems likely the problem.
Adding to this we also noted, that in the case of the $10 \times 10 \times Adding to this we also noted, that in the case of the $10 \times 10 \times
10$--grid the regularity was always $0$, as a non--contributing control-point 10$--grid the *regularity* was always $0$, as a non--contributing control--point
yields a $0$--column in the deformation--matrix, thus letting yields a $0$--column in the deformation--matrix, thus letting
$\sigma_\mathrm{min} = 0$. A better definition for regularity (i.e. using the $\sigma_\mathrm{min} = 0$. A better definition for *regularity* (i.e. using the
smallest non--zero right singular value) could solve this particular issue, but smallest non--zero right singular value) could solve this particular issue, but
not fix the trend we noticed above. not fix the trend we noticed above.
@ -1295,8 +1297,8 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
\cline{2-4} \cline{2-4}
\multicolumn{3}{c}{} & all: 0.95 (0) \T \multicolumn{3}{c}{} & all: 0.95 (0) \T
\end{tabular} \end{tabular}
\caption[Correlation between improvement--potential and fitting--error for 3D]{Correlation \caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation
between improvement--potential and fitting--error for the 3D fitting scenario. between *improvement potential* and fitting--error for the 3D fitting scenario.
Displayed are the negated Spearman coefficients with the corresponding p--values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
@ -1314,20 +1316,20 @@ quality of such gradients anyway.
\centering \centering
\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png} \includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png}
\caption[Improvement potential for different 3D--grids]{ \caption[Improvement potential for different 3D--grids]{
Plots of improvement potential against error given by our fitness--function Plots of *improvement potential* against error given by our *fitness--function*
after convergence together with a linear fit of each of the plotted data to after convergence together with a linear fit of each of the plotted data to
indicate trends.} indicate trends.}
\label{fig:resimp3d} \label{fig:resimp3d}
\end{figure} \end{figure}
We plotted our findings on the improvement potential in a similar way as we did We plotted our findings on the *improvement potential* in a similar way as we did
before with the regularity. In figure \ref{fig:resimp3d} one can clearly see the before with the *regularity*. In figure \ref{fig:resimp3d} one can clearly see the
correlation and the spread within each setup and the behaviour when we increase correlation and the spread within each setup and the behaviour when we increase
the number of control--points. the number of control--points.
Along with this we also give the Spearman--coefficients along with their Along with this we also give the Spearman--coefficients along with their
p--values in table \ref{tab:3dimp}. Within one scenario we only find a *weak* to p--values in table \ref{tab:3dimp}. Within one scenario we only find a *weak* to
*moderate* correlation between the improvement potential and the fitting error, *moderate* correlation between the *improvement potential* and the fitting error,
but all findings (except for $7 \times 4 \times 4$ and $6 \times 6 \times 6$) but all findings (except for $7 \times 4 \times 4$ and $6 \times 6 \times 6$)
are significant. are significant.
@ -1335,14 +1337,14 @@ If we take multiple datasets into account the correlation is *very strong* and
*significant*, which is good, as this functions as a litmus--test, because the *significant*, which is good, as this functions as a litmus--test, because the
quality is naturally tied to the number of control--points. quality is naturally tied to the number of control--points.
All in all the improvement potential seems to be a good and sensible measure of All in all the *improvement potential* seems to be a good and sensible measure of
quality, even given gradients of varying quality. quality, even given gradients of varying quality.
Lastly, a small note on the behaviour of improvement potential and convergence Lastly, a small note on the behaviour of *improvement potential* and convergence
speed, as we used this in the 1D case to argue, why the *regularity* defied our speed, as we used this in the 1D case to argue, why the *regularity* defied our
expectations. As a contrast we wanted to show, that improvement potential cannot expectations. As a contrast we wanted to show, that *improvement potential* cannot
serve for good predictions of the convergence speed. In figure serve for good predictions of the convergence speed. In figure
\ref{fig:imp1d3d} we show improvement potential against number of iterations \ref{fig:imp1d3d} we show *improvement potential* against number of iterations
for both scenarios. As one can see, in the 1D scenario we have a *strong* for both scenarios. As one can see, in the 1D scenario we have a *strong*
and *significant* correlation (with $-r_S = -0.72$, $p = 0$), whereas in the 3D and *significant* correlation (with $-r_S = -0.72$, $p = 0$), whereas in the 3D
scenario we have the opposite *significant* and *strong* effect (with scenario we have the opposite *significant* and *strong* effect (with
@ -1352,11 +1354,11 @@ scenario and are not suited for generalization.
\begin{figure}[hbt] \begin{figure}[hbt]
\centering \centering
\includegraphics[width=\textwidth]{img/imp1d3d.png} \includegraphics[width=\textwidth]{img/imp1d3d.png}
\caption[Improvement potential and convergence speed for 1D and 3D--scenarios]{ \caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{
\newline \newline
Left: Improvement potential against convergence speed for the Left: *Improvement potential* against convergence speed for the
1D--scenario\newline 1D--scenario\newline
Right: Improvement potential against convergence speed for the 3D--scnario Right: *Improvement potential* against convergence speed for the 3D--scnario
} }
\label{fig:imp1d3d} \label{fig:imp1d3d}
\end{figure} \end{figure}
@ -1364,23 +1366,23 @@ Right: Improvement potential against convergence speed for the 3D--scnario
# Discussion and outlook # Discussion and outlook
\label{sec:dis} \label{sec:dis}
In this thesis we took a look at the different criteria for evolvability as In this thesis we took a look at the different criteria for *evolvability* as
introduced by Richter et al.\cite{anrichterEvol}, namely *variability*, introduced by Richter et al.\cite{anrichterEvol}, namely *variability*,
*regularity* and *improvement potential* under different setup--conditions. *regularity* and *improvement potential* under different setup--conditions.
Where Richter et al. used \acf{RBF}, we employed \acf{FFD} to set up a Where Richter et al. used \acf{RBF}, we employed \acf{FFD} to set up a
low--complexity parametrization of a more complex vertex--mesh. low--complexity parametrization of a more complex vertex--mesh.
In our findings we could show in the 1D--scenario, that there were statistically In our findings we could show in the 1D--scenario, that there were statistically
significant very strong correlations between *variability and fitting error* *significant* *very strong* correlations between *variability and fitting error*
($0.94$) and *improvement--potential and fitting error* ($1.0$) with ($0.94$) and *improvement potential and fitting error* ($1.0$) with
comparable results than Richter et al. (with $0.31$ to $0.88$ comparable results than Richter et al. (with $0.31$ to $0.88$
for the former and $0.75$ to $0.99$ for the latter), whereas we found for the former and $0.75$ to $0.99$ for the latter), whereas we found
only weak correlations for *regularity and convergence--speed* ($0.28$) only *weak* correlations for *regularity and convergence--speed* ($0.28$)
opposed to Richter et al. with $0.39$ to $0.91$.^[We only took statistically opposed to Richter et al. with $0.39$ to $0.91$.^[We only took statistically
*significant* results into consideration when compiling these numbers. Details *significant* results into consideration when compiling these numbers. Details
are given in the respective chapters.] are given in the respective chapters.]
For the 3D--scenario our results show a very strong, significant correlation For the 3D--scenario our results show a *very strong*, *significant* correlation
between *variability and fitting error* with $0.89$ to $0.94$, which are pretty between *variability and fitting error* with $0.89$ to $0.94$, which are pretty
much in line with the findings of Richter et al. ($0.65$ to $0.95$). The much in line with the findings of Richter et al. ($0.65$ to $0.95$). The
correlation between *improvement potential and fitting error* behave similar, correlation between *improvement potential and fitting error* behave similar,
@ -1410,10 +1412,7 @@ in \cite{anrichterEvol}, whereas we merely used an indirect \ac{FFD}--approach.
As direct manipulations tend to perform better than indirect manipulations, the As direct manipulations tend to perform better than indirect manipulations, the
usage of \acf{DM--FFD} could also work better with the criteria we examined. usage of \acf{DM--FFD} could also work better with the criteria we examined.
This can also solve the problem of bad singular values for the *regularity* as This can also solve the problem of bad singular values for the *regularity* as
the incorporation of the parametrization of the points on the surface, which are the incorporation of the parametrization of the points on the surface --- which
the essential part of a direct--manipulation, could cancel out a bad are the essential part of a direct--manipulation --- could cancel out a bad
control--grid as the bad control--points are never or negligibly used to control--grid as the bad control--points are never or negligibly used to
parametrize those surface--points. parametrize those surface--points.
\improvement[inline]{Bibliotheksverzeichnis links anpassen. DOI überschreibt
Direktlinks des Autors.}

Binary file not shown.

View File

@ -197,20 +197,20 @@ the translation of the problem--domain into a simple parametric
representation (the \emph{genome}) can be challenging. representation (the \emph{genome}) can be challenging.
This translation is often necessary as the target of the optimization This translation is often necessary as the target of the optimization
may have too many degrees of freedom. In the example of an aerodynamic may have too many degrees of freedom for a reasonable computation. In
simulation of drag onto an object, those object--designs tend to have a the example of an aerodynamic simulation of drag onto an object, those
high number of vertices to adhere to various requirements (visual, object--designs tend to have a high number of vertices to adhere to
practical, physical, etc.). A simpler representation of the same object various requirements (visual, practical, physical, etc.). A simpler
in only a few parameters that manipulate the whole in a sensible matter representation of the same object in only a few parameters that
are desirable, as this often decreases the computation time manipulate the whole in a sensible matter are desirable, as this often
significantly. decreases the computation time significantly.
Additionally one can exploit the fact, that drag in this case is Additionally one can exploit the fact, that drag in this case is
especially sensitive to non--smooth surfaces, so that a smooth local especially sensitive to non--smooth surfaces, so that a smooth local
manipulation of the surface as a whole is more advantageous than merely manipulation of the surface as a whole is more advantageous than merely
random manipulation of the vertices. random manipulation of the vertices.
The quality of such a low-dimensional representation in biological The quality of such a low--dimensional representation in biological
evolution is strongly tied to the notion of evolution is strongly tied to the notion of
\emph{evolvability}\cite{wagner1996complex}, as the parametrization of \emph{evolvability}\cite{wagner1996complex}, as the parametrization of
the problem has serious implications on the convergence speed and the the problem has serious implications on the convergence speed and the
@ -230,7 +230,7 @@ One example of such a general representation of an object is to generate
random points and represent vertices of an object as distances to these random points and represent vertices of an object as distances to these
points --- for example via \acf{RBF}. If one (or the algorithm) would points --- for example via \acf{RBF}. If one (or the algorithm) would
move such a point the object will get deformed only locally (due to the move such a point the object will get deformed only locally (due to the
\ac{RBF}). As this results in a simple mapping from the parameter-space \ac{RBF}). As this results in a simple mapping from the parameter--space
onto the object one can try out different representations of the same onto the object one can try out different representations of the same
object and evaluate which criteria may be suited to describe this notion object and evaluate which criteria may be suited to describe this notion
of \emph{evolvability}. This is exactly what Richter et of \emph{evolvability}. This is exactly what Richter et
@ -238,18 +238,19 @@ al.\cite{anrichterEvol} have done.
As we transfer the results of Richter et al.\cite{anrichterEvol} from As we transfer the results of Richter et al.\cite{anrichterEvol} from
using \acf{RBF} as a representation to manipulate geometric objects to using \acf{RBF} as a representation to manipulate geometric objects to
the use of \acf{FFD} we will use the same definition for evolvability the use of \acf{FFD} we will use the same definition for
the original author used, namely \emph{regularity}, \emph{variability}, \emph{evolvability} the original author used, namely \emph{regularity},
and \emph{improvement potential}. We introduce these term in detail in \emph{variability}, and \emph{improvement potential}. We introduce these
Chapter \ref{sec:intro:rvi}. In the original publication the author term in detail in Chapter \ref{sec:intro:rvi}. In the original
could show a correlation between these evolvability--criteria with the publication the author could show a correlation between these
quality and convergence speed of such optimization. evolvability--criteria with the quality and convergence speed of such
optimization.
We will replicate the same setup on the same objects but use \acf{FFD} We will replicate the same setup on the same objects but use \acf{FFD}
instead of \acf{RBF} to create a local deformation near the control instead of \acf{RBF} to create a local deformation near the
points and evaluate if the evolution--criteria still work as a predictor control--points and evaluate if the evolution--criteria still work as a
for \emph{evolvability} of the representation given the different predictor for \emph{evolvability} of the representation given the
deformation scheme, as suspected in \cite{anrichterEvol}. different deformation scheme, as suspected in \cite{anrichterEvol}.
First we introduce different topics in isolation in Chapter First we introduce different topics in isolation in Chapter
\ref{sec:back}. We take an abstract look at the definition of \ac{FFD} \ref{sec:back}. We take an abstract look at the definition of \ac{FFD}
@ -258,7 +259,7 @@ is a sensible deformation function (in \ref{sec:back:ffdgood}). Then we
establish some background--knowledge of evolutionary algorithms (in establish some background--knowledge of evolutionary algorithms (in
\ref{sec:back:evo}) and why this is useful in our domain (in \ref{sec:back:evo}) and why this is useful in our domain (in
\ref{sec:back:evogood}) followed by the definition of the different \ref{sec:back:evogood}) followed by the definition of the different
evolvability criteria established in \cite{anrichterEvol} (in evolvability--criteria established in \cite{anrichterEvol} (in
\ref {sec:intro:rvi}). \ref {sec:intro:rvi}).
In Chapter \ref{sec:impl} we take a look at our implementation of In Chapter \ref{sec:impl} we take a look at our implementation of
@ -285,18 +286,19 @@ from \cite{spitzmuller1996bezier} here and go into the extension to the
The main idea of \ac{FFD} is to create a function The main idea of \ac{FFD} is to create a function
\(s : [0,1[^d \mapsto \mathbb{R}^d\) that spans a certain part of a \(s : [0,1[^d \mapsto \mathbb{R}^d\) that spans a certain part of a
vector--space and is only linearly parametrized by some special control vector--space and is only linearly parametrized by some special
points \(p_i\) and an constant attribution--function \(a_i(u)\), so \[ control--points \(p_i\) and an constant attribution--function
\(a_i(u)\), so \[
s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i} s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i}
\] can be thought of a representation of the inside of the convex hull \] can be thought of a representation of the inside of the convex hull
generated by the control points where each point can be accessed by the generated by the control--points where each position inside can be
right \(u \in [0,1[^d\). accessed by the right \(u \in [0,1[^d\).
\begin{figure}[!ht] \begin{figure}[!ht]
\begin{center} \begin{center}
\includegraphics[width=0.7\textwidth]{img/B-Splines.png} \includegraphics[width=0.7\textwidth]{img/B-Splines.png}
\end{center} \end{center}
\caption[Example of B-Splines]{Example of a parametrization of a line with \caption[Example of B--Splines]{Example of a parametrization of a line with
corresponding deformation to generate a deformed objet} corresponding deformation to generate a deformed objet}
\label{fig:bspline} \label{fig:bspline}
\end{figure} \end{figure}
@ -338,7 +340,8 @@ We can even derive this equation straightforward for an arbitrary
\[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\] \[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\]
For a B--Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these For a B--Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these
derivations yield \(\frac{\partial^d}{\partial u} s(u) = 0\). derivations yield
\(\left(\frac{\partial}{\partial u}\right)^d s(u) = 0\).
Another interesting property of these recursive polynomials is that they Another interesting property of these recursive polynomials is that they
are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in
@ -348,17 +351,17 @@ step of the recursion.
This means that all changes are only a local linear combination between This means that all changes are only a local linear combination between
the control--point \(p_i\) to \(p_{i+d+1}\) and consequently this yields the control--point \(p_i\) to \(p_{i+d+1}\) and consequently this yields
to the convex--hull--property of B-Splines --- meaning, that no matter to the convex--hull--property of B--Splines --- meaning, that no matter
how we choose our coefficients, the resulting points all have to lie how we choose our coefficients, the resulting points all have to lie
inside convex--hull of the control--points. inside convex--hull of the control--points.
For a given point \(v_i\) we can then calculate the contributions For a given point \(s_i\) we can then calculate the contributions
\(n_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the \(u_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the
projection from the control--point--space into the object--space: \[ projection from the control--point--space into the object--space: \[
v_i = \sum_j n_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p} s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
\] or written for all points at the same time: \[ \] or written for all points at the same time: \[
\vec{v} = \vec{N} \vec{p} \vec{s} = \vec{U} \vec{p}
\] where \(\vec{N}\) is the \(n \times m\) transformation--matrix (later \] where \(\vec{U}\) is the \(n \times m\) transformation--matrix (later
on called \textbf{deformation matrix}) for \(n\) object--space--points on called \textbf{deformation matrix}) for \(n\) object--space--points
and \(m\) control--points. and \(m\) control--points.
@ -372,7 +375,7 @@ of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum
up to one (partition of unity). In this example, we use B--Splines of degree 2. up to one (partition of unity). In this example, we use B--Splines of degree 2.
The horizontal segment below the abscissa axis represents the domain of The horizontal segment below the abscissa axis represents the domain of
influence of the B--splines basis function, i.e. the interval on which they are influence of the B--splines basis function, i.e. the interval on which they are
not null. At a given point, there are at most $ d+1$ non-zero B--Spline basis not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis
functions (compact support).\grqq \newline functions (compact support).\grqq \newline
Note, that Brunet starts his index at $-d$ opposed to our definition, where we Note, that Brunet starts his index at $-d$ opposed to our definition, where we
start at $0$.} start at $0$.}
@ -381,8 +384,8 @@ start at $0$.}
Furthermore B--Spline--basis--functions form a partition of unity for Furthermore B--Spline--basis--functions form a partition of unity for
all, but the first and last \(d\) all, but the first and last \(d\)
control-points\cite{brunet2010contributions}. Therefore we later on use control--points\cite{brunet2010contributions}. Therefore we later on use
the border-points \(d+1\) times, such that \(\sum_j n_{i,j} p_j = p_i\) the border--points \(d+1\) times, such that \(\sum_j u_{i,j} p_j = p_i\)
for these points. for these points.
The locality of the influence of each control--point and the partition The locality of the influence of each control--point and the partition
@ -395,13 +398,13 @@ function?}{Why is a good deformation function?}}\label{why-is-a-good-deformatio
\label{sec:back:ffdgood} \label{sec:back:ffdgood}
The usage of \ac{FFD} as a tool for manipulating follows directly from The usage of \ac{FFD} as a tool for manipulating follows directly from
the properties of the polynomials and the correspondence to the control the properties of the polynomials and the correspondence to the
points. Having only a few control points gives the user a nicer control--points. Having only a few control--points gives the user a
high--level--interface, as she only needs to move these points and the nicer high--level--interface, as she only needs to move these points and
model follows in an intuitive manner. The deformation is smooth as the the model follows in an intuitive manner. The deformation is smooth as
underlying polygon is smooth as well and affects as many vertices of the the underlying polygon is smooth as well and affects as many vertices of
model as needed. Moreover the changes are always local so one risks not the model as needed. Moreover the changes are always local so one risks
any change that a user cannot immediately see. not any change that a user cannot immediately see.
But there are also disadvantages of this approach. The user loses the But there are also disadvantages of this approach. The user loses the
ability to directly influence vertices and even seemingly simple tasks ability to directly influence vertices and even seemingly simple tasks
@ -479,7 +482,7 @@ through our \emph{fitness--function}, biologically by the ability to
survive and produce offspring). Any individual in our algorithm thus survive and produce offspring). Any individual in our algorithm thus
experience a biologically motivated life cycle of inheriting genes from experience a biologically motivated life cycle of inheriting genes from
the parents, modified by mutations occurring, performing according to a the parents, modified by mutations occurring, performing according to a
fitness--metric and generating offspring based on this. Therefore each fitness--metric, and generating offspring based on this. Therefore each
iteration in the while--loop above is also often named generation. iteration in the while--loop above is also often named generation.
One should note that there is a subtle difference between One should note that there is a subtle difference between
@ -517,8 +520,8 @@ The main algorithm just repeats the following steps:
\(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that \(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that
selects from the previously generated \(I^\lambda\) children and selects from the previously generated \(I^\lambda\) children and
optionally also the parents (denoted by the set \(Q\) in the optionally also the parents (denoted by the set \(Q\) in the
algorithm) using the fitness--function \(\Phi\). The result of this algorithm) using the \emph{fitness--function} \(\Phi\). The result of
operation is the next Population of \(\mu\) individuals. this operation is the next Population of \(\mu\) individuals.
\end{itemize} \end{itemize}
All these functions can (and mostly do) have a lot of hidden parameters All these functions can (and mostly do) have a lot of hidden parameters
@ -547,10 +550,10 @@ algorithms}\label{advantages-of-evolutionary-algorithms}
The main advantage of evolutionary algorithms is the ability to find The main advantage of evolutionary algorithms is the ability to find
optima of general functions just with the help of a given optima of general functions just with the help of a given
fitness--function. Components and techniques for evolutionary algorithms \emph{fitness--function}. Components and techniques for evolutionary
are specifically known to help with different problems arising in the algorithms are specifically known to help with different problems
domain of optimization\cite{weise2012evolutionary}. An overview of the arising in the domain of optimization\cite{weise2012evolutionary}. An
typical problems are shown in figure \ref{fig:probhard}. overview of the typical problems are shown in figure \ref{fig:probhard}.
\begin{figure}[!ht] \begin{figure}[!ht]
\includegraphics[width=\textwidth]{img/weise_fig3.png} \includegraphics[width=\textwidth]{img/weise_fig3.png}
@ -559,13 +562,14 @@ typical problems are shown in figure \ref{fig:probhard}.
\end{figure} \end{figure}
Most of the advantages stem from the fact that a gradient--based Most of the advantages stem from the fact that a gradient--based
procedure has only one point of observation from where it evaluates the procedure has usually only one point of observation from where it
next steps, whereas an evolutionary strategy starts with a population of evaluates the next steps, whereas an evolutionary strategy starts with a
guessed solutions. Because an evolutionary strategy can be modified population of guessed solutions. Because an evolutionary strategy can be
according to the problem--domain (i.e.~by the ideas given above) it can modified according to the problem--domain (i.e.~by the ideas given
also approximate very difficult problems in an efficient manner and even above) it can also approximate very difficult problems in an efficient
self--tune parameters depending on the ancestry at runtime\footnote{Some manner and even self--tune parameters depending on the ancestry at
examples of this are explained in detail in \cite{eiben1999parameter}}. runtime\footnote{Some examples of this are explained in detail in
\cite{eiben1999parameter}}.
If an analytic best solution exists and is easily computable If an analytic best solution exists and is easily computable
(i.e.~because the error--function is convex) an evolutionary algorithm (i.e.~because the error--function is convex) an evolutionary algorithm
@ -599,8 +603,8 @@ deformation.
We can also think of the deformation in terms of differences from the We can also think of the deformation in terms of differences from the
original coordinates \[ original coordinates \[
\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P} \Delta \vec{S} = \vec{U} \cdot \Delta \vec{P}
\] which is isomorphic to the former due to the linear correlation in \] which is isomorphic to the former due to the linearity of the
the deformation. One can see in this way, that the way the deformation deformation. One can see in this way, that the way the deformation
behaves lies solely in the entries of \(\vec{U}\), which is why the behaves lies solely in the entries of \(\vec{U}\), which is why the
three criteria focus on this. three criteria focus on this.
@ -609,14 +613,14 @@ three criteria focus on this.
In \cite{anrichterEvol} \emph{variability} is defined as In \cite{anrichterEvol} \emph{variability} is defined as
\[\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},\] \[\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},\]
whereby \(\vec{U}\) is the \(n \times m\) deformation--Matrix used to whereby \(\vec{U}\) is the \(n \times m\) deformation--Matrix used to
map the \(m\) control points onto the \(n\) vertices. map the \(m\) control--points onto the \(n\) vertices.
Given \(n = m\), an identical number of control--points and vertices, Given \(n = m\), an identical number of control--points and vertices,
this quotient will be \(=1\) if all control points are independent of this quotient will be \(=1\) if all control--points are independent of
each other and the solution is to trivially move every control--point each other and the solution is to trivially move every control--point
onto a target--point. onto a target--point.
In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because as In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because
there are only few control--points for many vertices, so \(m \ll n\). there are only few control--points for many vertices, so \(m \ll n\).
This criterion should correlate to the degrees of freedom the given This criterion should correlate to the degrees of freedom the given
@ -641,7 +645,7 @@ As we deform the given Object only based on the parameters as
\(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that \(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that
\(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\). \(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\).
The inversion of \(\kappa(\vec{U})\) is only performed to map the The inversion of \(\kappa(\vec{U})\) is only performed to map the
criterion--range to \([0..1]\), whereas \(1\) is the optimal value and criterion--range to \([0..1]\), where \(1\) is the optimal value and
\(0\) is the worst value. \(0\) is the worst value.
On the one hand this criterion should be characteristic for numeric On the one hand this criterion should be characteristic for numeric
@ -653,8 +657,8 @@ locality\cite{weise2012evolutionary,thorhauer2014locality}.
\subsection{Improvement Potential}\label{improvement-potential} \subsection{Improvement Potential}\label{improvement-potential}
In contrast to the general nature of \emph{variability} and In contrast to the general nature of \emph{variability} and
\emph{regularity}, which are agnostic of the fitness--function at hand, \emph{regularity}, which are agnostic of the \emph{fitness--function} at
the third criterion should reflect a notion of the potential for hand, the third criterion should reflect a notion of the potential for
optimization, taking a guess into account. optimization, taking a guess into account.
Most of the times some kind of gradient \(g\) is available to suggest a Most of the times some kind of gradient \(g\) is available to suggest a
@ -698,9 +702,9 @@ As we have established in Chapter \ref{sec:back:ffd} we can define an
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i \Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
\end{equation} \end{equation}
Note that we only sum up the \(\Delta\)--displacements in the control Note that we only sum up the \(\Delta\)--displacements in the
points \(c_i\) to get the change in position of the point we are control--points \(c_i\) to get the change in position of the point we
interested in. are interested in.
In this way every deformed vertex is defined by \[ In this way every deformed vertex is defined by \[
\textrm{Deform}(v_x) = v_x + \Delta_x(u) \textrm{Deform}(v_x) = v_x + \Delta_x(u)
@ -722,8 +726,8 @@ v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i
For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which
converges into the least--squares solution. An exact solution of this converges into the least--squares solution. An exact solution of this
problem is impossible most of the times, because we usually have way problem is impossible most of the time, because we usually have way more
more vertices than control points (\(\#v~\gg~\#c\)). vertices than control--points (\(\#v~\gg~\#c\)).
\section{\texorpdfstring{Adaption of \ac{FFD} for a \section{\texorpdfstring{Adaption of \ac{FFD} for a
3D--Mesh}{Adaption of for a 3D--Mesh}}\label{adaption-of-for-a-3dmesh} 3D--Mesh}{Adaption of for a 3D--Mesh}}\label{adaption-of-for-a-3dmesh}
@ -735,8 +739,8 @@ last chapter. But this time things get a bit more complicated. As we
have a 3--dimensional grid we may have a different amount of have a 3--dimensional grid we may have a different amount of
control--points in each direction. control--points in each direction.
Given \(n,m,o\) control points in \(x,y,z\)--direction each Point on the Given \(n,m,o\) control--points in \(x,y,z\)--direction each Point on
curve is defined by the curve is defined by
\[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\] \[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\]
In this case we have three different B--Splines (one for each dimension) In this case we have three different B--Splines (one for each dimension)
@ -814,14 +818,14 @@ behaviour of the evolutionary algorithm.
As mentioned in chapter \ref{sec:back:evo}, the way of choosing the As mentioned in chapter \ref{sec:back:evo}, the way of choosing the
representation to map the general problem (mesh--fitting/optimization in representation to map the general problem (mesh--fitting/optimization in
our case) into a parameter-space is very important for the quality and our case) into a parameter--space is very important for the quality and
runtime of evolutionary algorithms\cite{Rothlauf2006}. runtime of evolutionary algorithms\cite{Rothlauf2006}.
Because our control--points are arranged in a grid, we can accurately Because our control--points are arranged in a grid, we can accurately
represent each vertex--point inside the grids volume with proper represent each vertex--point inside the grids volume with proper
B--Spline--coefficients between \([0,1[\) and --- as a consequence --- B--Spline--coefficients between \([0,1[\) and --- as a consequence ---
we have to embed our object into it (or create constant ``dummy''-points we have to embed our object into it (or create constant
outside). ``dummy''--points outside).
The great advantage of B--Splines is the local, direct impact of each The great advantage of B--Splines is the local, direct impact of each
control point without having a \(1:1\)--correlation, and a smooth control point without having a \(1:1\)--correlation, and a smooth
@ -844,18 +848,18 @@ One would normally think, that the more control--points you add, the
better the result will be, but this is not the case for our B--Splines. better the result will be, but this is not the case for our B--Splines.
Given any point \(\vec{p}\) only the \(2 \cdot (d-1)\) control--points Given any point \(\vec{p}\) only the \(2 \cdot (d-1)\) control--points
contribute to the parametrization of that point\footnote{Normally these contribute to the parametrization of that point\footnote{Normally these
are \(d-1\) to each side, but at the boundaries the number gets are \(d-1\) to each side, but at the boundaries border points get used
increased to the inside to meet the required smoothness}. This means, multiple times to meet the number of points required}. This means,
that a high resolution can have many control-points that are not that a high resolution can have many control--points that are not
contributing to any point on the surface and are thus completely contributing to any point on the surface and are thus completely
irrelevant to the solution. irrelevant to the solution.
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the
four red central points are not relevant for the parametrization of the red central points are not relevant for the parametrization of the
circle. This leads to artefacts in the deformation--matrix \(\vec{U}\), circle. This leads to artefacts in the deformation--matrix \(\vec{U}\),
as the columns corresponding to those control--points are \(0\). as the columns corresponding to those control--points are \(0\).
This leads to useless increased complexity, as the parameters This also leads to useless increased complexity, as the parameters
corresponding to those points will never have any effect, but a naive corresponding to those points will never have any effect, but a naive
algorithm will still try to optimize them yielding numeric artefacts in algorithm will still try to optimize them yielding numeric artefacts in
the best and non--terminating or ill--defined solutions\footnote{One the best and non--terminating or ill--defined solutions\footnote{One
@ -869,22 +873,23 @@ in the first place. We will address this in a special scenario in
\ref{sec:res:3d:var}. \ref{sec:res:3d:var}.
For our tests we chose different uniformly sized grids and added noise For our tests we chose different uniformly sized grids and added noise
onto each control-point\footnote{For the special case of the outer layer onto each control--point\footnote{For the special case of the outer
we only applied noise away from the object, so the object is still layer we only applied noise away from the object, so the object is
confined in the convex hull of the control--points.} to simulate still confined in the convex hull of the control--points.} to simulate
different starting-conditions. different starting--conditions.
\chapter{\texorpdfstring{Scenarios for testing evolvability criteria \chapter{\texorpdfstring{Scenarios for testing evolvability--criteria
using using
\ac{FFD}}{Scenarios for testing evolvability criteria using }}\label{scenarios-for-testing-evolvability-criteria-using} \ac{FFD}}{Scenarios for testing evolvability--criteria using }}\label{scenarios-for-testing-evolvabilitycriteria-using}
\label{sec:eval} \label{sec:eval}
In our experiments we use the same two testing--scenarios, that were In our experiments we use the same two testing--scenarios, that were
also used by \cite{anrichterEvol}. The first scenario deforms a plane also used by Richter et al.\cite{anrichterEvol} The first scenario
into a shape originally defined in \cite{giannelli2012thb}, where we deforms a plane into a shape originally defined by Giannelli et
setup control-points in a 2--dimensional manner and merely deform in the al.\cite{giannelli2012thb}, where we setup control--points in a
height--coordinate to get the resulting shape. 2--dimensional manner and merely deform in the height--coordinate to get
the resulting shape.
In the second scenario we increase the degrees of freedom significantly In the second scenario we increase the degrees of freedom significantly
by using a 3--dimensional control--grid to deform a sphere into a face, by using a 3--dimensional control--grid to deform a sphere into a face,
@ -922,7 +927,7 @@ including a wireframe--overlay of the vertices.}
\label{fig:1dtarget} \label{fig:1dtarget}
\end{figure} \end{figure}
As the starting-plane we used the same shape, but set all As the starting--plane we used the same shape, but set all
\(z\)--coordinates to \(0\), yielding a flat plane, which is partially \(z\)--coordinates to \(0\), yielding a flat plane, which is partially
already correct. already correct.
@ -936,11 +941,11 @@ corresponding vertex
where \(t_i\) are the respective target--vertices to the parametrized where \(t_i\) are the respective target--vertices to the parametrized
source--vertices\footnote{The parametrization is encoded in \(\vec{U}\) source--vertices\footnote{The parametrization is encoded in \(\vec{U}\)
and the initial position of the control points. See and the initial position of the control--points. See
\ref{sec:ffd:adapt}} with the current deformation--parameters \ref{sec:ffd:adapt}} with the current deformation--parameters
\(\vec{p} = (p_1,\dots, p_m)\). We can do this \(\vec{p} = (p_1,\dots, p_m)\). We can do this
one--to--one--correspondence because we have exactly the same number of one--to--one--correspondence because we have exactly the same number of
source and target-vertices do to our setup of just flattening the source and target--vertices do to our setup of just flattening the
object. object.
This formula is also the least--squares approximation error for which we This formula is also the least--squares approximation error for which we
@ -975,16 +980,16 @@ Both of these Models can be seen in figure \ref{fig:3dtarget}.
Opposed to the 1D--case we cannot map the source and target--vertices in Opposed to the 1D--case we cannot map the source and target--vertices in
a one--to--one--correspondence, which we especially need for the a one--to--one--correspondence, which we especially need for the
approximation of the fitting--error. Hence we state that the error of approximation of the fitting--error. Hence we state that the error of
one vertex is the distance to the closest vertex of the other model and one vertex is the distance to the closest vertex of the respective other
sum up the error from the respective source and target. model and sum up the error from the source and target.
We therefore define the \emph{fitness--function} to be: We therefore define the \emph{fitness--function} to be:
\begin{equation} \begin{equation}
\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} - \mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} -
\vec{s_i}\|_2^2}_{\textrm{source-to-target--distance}} \vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}}
+ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} - + \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} -
\vec{t_i}\|_2^2}_{\textrm{target-to-source--distance}} \vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}}
+ \lambda \cdot \textrm{regularization}(\vec{P}) + \lambda \cdot \textrm{regularization}(\vec{P})
\label{eq:fit3d} \label{eq:fit3d}
\end{equation} \end{equation}
@ -1002,7 +1007,7 @@ calculated coefficients for the \ac{FFD} --- analog to the 1D case ---
and finally \(\vec{P}\) being the \(m \times 3\)--matrix of the and finally \(\vec{P}\) being the \(m \times 3\)--matrix of the
control--grid defining the whole deformation. control--grid defining the whole deformation.
As regularization-term we add a weighted Laplacian of the deformation As regularization--term we add a weighted Laplacian of the deformation
that has been used before by Aschenbach et that has been used before by Aschenbach et
al.\cite[Section 3.2]{aschenbach2015} on similar models and was shown to al.\cite[Section 3.2]{aschenbach2015} on similar models and was shown to
lead to a more precise fit. The Laplacian lead to a more precise fit. The Laplacian
@ -1034,7 +1039,7 @@ To compare our results to the ones given by Richter et
al.\cite{anrichterEvol}, we also use Spearman's rank correlation al.\cite{anrichterEvol}, we also use Spearman's rank correlation
coefficient. Opposed to other popular coefficients, like the Pearson coefficient. Opposed to other popular coefficients, like the Pearson
correlation coefficient, which measures a linear relationship between correlation coefficient, which measures a linear relationship between
variables, the Spearmans's coefficient assesses \glqq how well an variables, the Spearman's coefficient assesses \glqq how well an
arbitrary monotonic function can describe the relationship between two arbitrary monotonic function can describe the relationship between two
variables, without making any assumptions about the frequency variables, without making any assumptions about the frequency
distribution of the variables\grqq\cite{hauke2011comparison}. distribution of the variables\grqq\cite{hauke2011comparison}.
@ -1072,18 +1077,19 @@ Approximation}\label{procedure-1d-function-approximation}
\label{sec:proc:1d} \label{sec:proc:1d}
For our setup we first compute the coefficients of the For our setup we first compute the coefficients of the
deformation--matrix and use then the formulas for \emph{variability} and deformation--matrix and use the formulas for \emph{variability} and
\emph{regularity} to get our predictions. Afterwards we solve the \emph{regularity} to get our predictions. Afterwards we solve the
problem analytically to get the (normalized) correct gradient that we problem analytically to get the (normalized) correct gradient that we
use as guess for the \emph{improvement potential}. To check we also use as guess for the \emph{improvement potential}. To further test the
consider a distorted gradient \(\vec{g}_{\mathrm{d}}\) \[ \emph{improvement potential} we also consider a distorted gradient
\(\vec{g}_{\mathrm{d}}\): \[
\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|} \vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|}
\] where \(\mathbb{1}\) is the vector consisting of \(1\) in every \] where \(\mathbb{1}\) is the vector consisting of \(1\) in every
dimension, \(\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}\) is the dimension, \(\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}\) is the
calculated correct gradient, and \(\mu\) is used to blend between calculated correct gradient, and \(\mu\) is used to blend between
\(\vec{g}_\mathrm{c}\) and \(\mathbb{1}\). As we always start with a \(\vec{g}_\mathrm{c}\) and \(\mathbb{1}\). As we always start with a
gradient of \(p = \mathbb{0}\) this means shortens gradient of \(p = \mathbb{0}\) this means we can shorten the definition
\(\vec{g}_\mathrm{c} = \vec{p^{*}}\). of \(\vec{g}_\mathrm{c}\) to \(\vec{g}_\mathrm{c} = \vec{p^{*}}\).
\begin{figure}[ht] \begin{figure}[ht]
\begin{center} \begin{center}
@ -1096,10 +1102,10 @@ random distortion to generate a testcase.}
We then set up a regular 2--dimensional grid around the object with the We then set up a regular 2--dimensional grid around the object with the
desired grid resolutions. To generate a testcase we then move the desired grid resolutions. To generate a testcase we then move the
grid--vertices randomly inside the x--y--plane. As self-intersecting grid--vertices randomly inside the x--y--plane. As self--intersecting
grids get tricky to solve with our implemented newtons--method we avoid grids get tricky to solve with our implemented newtons--method (see
the generation of such self--intersecting grids for our testcases (see section \ref{3dffd}) we avoid the generation of such self--intersecting
section \ref{3dffd}). grids for our testcases.
To achieve that we generated a gaussian distributed number with To achieve that we generated a gaussian distributed number with
\(\mu = 0, \sigma=0.25\) and clamped it to the range \([-0.25,0.25]\). \(\mu = 0, \sigma=0.25\) and clamped it to the range \([-0.25,0.25]\).
@ -1130,12 +1136,12 @@ In the case of our 1D--Optimization--problem, we have the luxury of
knowing the analytical solution to the given problem--set. We use this knowing the analytical solution to the given problem--set. We use this
to experimentally evaluate the quality criteria we introduced before. As to experimentally evaluate the quality criteria we introduced before. As
an evolutional optimization is partially a random process, we use the an evolutional optimization is partially a random process, we use the
analytical solution as a stopping-criteria. We measure the convergence analytical solution as a stopping--criteria. We measure the convergence
speed as number of iterations the evolutional algorithm needed to get speed as number of iterations the evolutional algorithm needed to get
within \(1.05 \times\) of the optimal solution. within \(1.05 \times\) of the optimal solution.
We used different regular grids that we manipulated as explained in We used different regular grids that we manipulated as explained in
Section \ref{sec:proc:1d} with a different number of control points. As Section \ref{sec:proc:1d} with a different number of control--points. As
our grids have to be the product of two integers, we compared a our grids have to be the product of two integers, we compared a
\(5 \times 5\)--grid with \(25\) control--points to a \(4 \times 7\) and \(5 \times 5\)--grid with \(25\) control--points to a \(4 \times 7\) and
\(7 \times 4\)--grid with \(28\) control--points. This was done to \(7 \times 4\)--grid with \(28\) control--points. This was done to
@ -1157,7 +1163,7 @@ Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.
\label{fig:1dvar} \label{fig:1dvar}
\end{figure} \end{figure}
Variability should characterize the potential for design space \emph{Variability} should characterize the potential for design space
exploration and is defined in terms of the normalized rank of the exploration and is defined in terms of the normalized rank of the
deformation matrix \(\vec{U}\): deformation matrix \(\vec{U}\):
\(V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}\), whereby \(n\) is the \(V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}\), whereby \(n\) is the
@ -1166,29 +1172,30 @@ number of vertices. As all our tested matrices had a constant rank
plotted the errors in the box plot in figure \ref{fig:1dvar} plotted the errors in the box plot in figure \ref{fig:1dvar}
It is also noticeable, that although the \(7 \times 4\) and It is also noticeable, that although the \(7 \times 4\) and
\(4 \times 7\) grids have a higher variability, they perform not better \(4 \times 7\) grids have a higher \emph{variability}, they perform not
than the \(5 \times 5\) grid. Also the \(7 \times 4\) and \(4 \times 7\) better than the \(5 \times 5\) grid. Also the \(7 \times 4\) and
grids differ distinctly from each other with a mean\(\pm\)sigma of \(4 \times 7\) grids differ distinctly from each other with a
\(233.09 \pm 12.32\) for the former and \(286.32 \pm 22.36\) for the mean\(\pm\)sigma of \(233.09 \pm 12.32\) for the former and
latter, although they have the same number of control--points. This is \(286.32 \pm 22.36\) for the latter, although they have the same number
an indication of an impact a proper or improper grid--setup can have. We of control--points. This is an indication of an impact a proper or
do not draw scientific conclusions from these findings, as more research improper grid--setup can have. We do not draw scientific conclusions
on non-squared grids seem necessary. from these findings, as more research on non--squared grids seem
necessary.
Leaving the issue of the grid--layout aside we focused on grids having Leaving the issue of the grid--layout aside we focused on grids having
the same number of prototypes in every dimension. For the the same number of prototypes in every dimension. For the
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids we found a \(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids we found a
\emph{very strong} correlation (\(-r_S = 0.94, p = 0\)) between the \emph{very strong} correlation (\(-r_S = 0.94, p = 0\)) between the
variability and the evolutionary error. \emph{variability} and the evolutionary error.
\subsection{Regularity}\label{regularity-1} \subsection{Regularity}\label{regularity-1}
\begin{figure}[tbh] \begin{figure}[tbh]
\centering \centering
\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png} \includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png}
\caption[Improvement potential and regularity vs. steps]{\newline \caption[Improvement potential and regularity against iterations]{\newline
Left: Improvement potential against steps until convergence\newline Left: *Improvement potential* against number of iterations until convergence\newline
Right: Regularity against steps until convergence\newline Right: *Regularity* against number of iterations until convergence\newline
Coloured by their grid--resolution, both with a linear fit over the whole Coloured by their grid--resolution, both with a linear fit over the whole
dataset.} dataset.}
\label{fig:1dreg} \label{fig:1dreg}
@ -1201,16 +1208,16 @@ $5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\
\hline \hline
$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$) $0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$)
\end{tabular} \end{tabular}
\caption[Correlation 1D Regularity/Steps]{Spearman's correlation (and p-values) \caption[Correlation 1D *regularity* against iterations]{Inverted Spearman's correlation (and p--values)
between regularity and convergence speed for the 1D function approximation between *regularity* and number of iterations for the 1D function approximation
problem. problem.
\newline Note: Not significant results are marked in \textcolor{red}{red}. \newline Note: Not significant results are marked in \textcolor{red}{red}.
} }
\label{tab:1dreg} \label{tab:1dreg}
\end{table} \end{table}
Regularity should correspond to the convergence speed (measured in \emph{Regularity} should correspond to the convergence speed (measured
iteration--steps of the evolutionary algorithm), and is computed as in iteration--steps of the evolutionary algorithm), and is computed as
inverse condition number \(\kappa(\vec{U})\) of the deformation--matrix. inverse condition number \(\kappa(\vec{U})\) of the deformation--matrix.
As can be seen from table \ref{tab:1dreg}, we could only show a As can be seen from table \ref{tab:1dreg}, we could only show a
@ -1222,27 +1229,27 @@ datasets into account we even get a \emph{strong} correlation of
To explain this discrepancy we took a closer look at what caused these To explain this discrepancy we took a closer look at what caused these
high number of iterations. In figure \ref{fig:1dreg} we also plotted the high number of iterations. In figure \ref{fig:1dreg} we also plotted the
improvement-potential against the steps next to the regularity--plot. \emph{improvement potential} against the steps next to the
Our theory is that the \emph{very strong} correlation \emph{regularity}--plot. Our theory is that the \emph{very strong}
(\(-r_S = -0.82, p=0\)) between improvement--potential and number of correlation (\(-r_S = -0.82, p=0\)) between \emph{improvement potential}
iterations hints that the employed algorithm simply takes longer to and number of iterations hints that the employed algorithm simply takes
converge on a better solution (as seen in figure \ref{fig:1dvar} and longer to converge on a better solution (as seen in figure
\ref{fig:1dimp}) offsetting any gain the regularity--measurement could \ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the
achieve. regularity--measurement could achieve.
\subsection{Improvement Potential}\label{improvement-potential-1} \subsection{Improvement Potential}\label{improvement-potential-1}
\begin{figure}[ht] \begin{figure}[ht]
\centering \centering
\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png} \includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png}
\caption[Correlation 1D Improvement vs. Error]{Improvement potential plotted \caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted
against the error yielded by the evolutionary optimization for different against the error yielded by the evolutionary optimization for different
grid--resolutions} grid--resolutions}
\label{fig:1dimp} \label{fig:1dimp}
\end{figure} \end{figure}
The improvement potential should correlate to the quality of the The \emph{improvement potential} should correlate to the quality of the
fitting--result. We plotted the results for the tested grid-sizes fitting--result. We plotted the results for the tested grid--sizes
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) in figure \(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) in figure
\ref{fig:1dimp}. We tested the \(4 \times 7\) and \(7 \times 4\) grids \ref{fig:1dimp}. We tested the \(4 \times 7\) and \(7 \times 4\) grids
as well, but omitted them from the plot. as well, but omitted them from the plot.
@ -1280,7 +1287,7 @@ Initially we set up the correspondences \(\vec{c_T(\dots)}\) and
other model. We then calculate the analytical solution given these other model. We then calculate the analytical solution given these
correspondences via \(\vec{P^{*}} = \vec{U^+}\vec{T}\), and also use the correspondences via \(\vec{P^{*}} = \vec{U^+}\vec{T}\), and also use the
first solution as guessed gradient for the calculation of the first solution as guessed gradient for the calculation of the
\emph{improvement--potential}, as the optimal solution is not known. We \emph{improvement potential}, as the optimal solution is not known. We
then let the evolutionary algorithm run up within \(1.05\) times the then let the evolutionary algorithm run up within \(1.05\) times the
error of this solution and afterwards recalculate the correspondences error of this solution and afterwards recalculate the correspondences
\(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\). \(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\).
@ -1310,12 +1317,12 @@ regularization--effect wears off.
The grid we use for our experiments is just very coarse due to The grid we use for our experiments is just very coarse due to
computational limitations. We are not interested in a good computational limitations. We are not interested in a good
reconstruction, but an estimate if the mentioned evolvability criteria reconstruction, but an estimate if the mentioned evolvability--criteria
are good. are good.
In figure \ref{fig:setup3d} we show an example setup of the scene with a In figure \ref{fig:setup3d} we show an example setup of the scene with a
\(4\times 4\times 4\)--grid. Identical to the 1--dimensional scenario \(4\times 4\times 4\)--grid. Identical to the 1--dimensional scenario
before, we create a regular grid and move the control-points in the before, we create a regular grid and move the control--points in the
exact same random manner between their neighbours as described in exact same random manner between their neighbours as described in
section \ref{sec:proc:1d}, but in three instead of two section \ref{sec:proc:1d}, but in three instead of two
dimensions\footnote{Again, we flip the signs for the edges, if necessary dimensions\footnote{Again, we flip the signs for the edges, if necessary
@ -1332,9 +1339,9 @@ Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.}
As is clearly visible from figure \ref{fig:3dgridres}, the target--model As is clearly visible from figure \ref{fig:3dgridres}, the target--model
has many vertices in the facial area, at the ears and in the has many vertices in the facial area, at the ears and in the
neck--region. Therefore we chose to increase the grid-resolutions for neck--region. Therefore we chose to increase the grid--resolutions for
our tests in two different dimensions and see how well the criteria our tests in two different dimensions and see how well the criteria
predict a suboptimal placement of these control-points. predict a suboptimal placement of these control--points.
\section{Results of 3D Function \section{Results of 3D Function
Approximation}\label{results-of-3d-function-approximation} Approximation}\label{results-of-3d-function-approximation}
@ -1342,8 +1349,8 @@ Approximation}\label{results-of-3d-function-approximation}
In the 3D--Approximation we tried to evaluate further on the impact of In the 3D--Approximation we tried to evaluate further on the impact of
the grid--layout to the overall criteria. As the target--model has many the grid--layout to the overall criteria. As the target--model has many
vertices in concentrated in the facial area we start from a vertices in concentrated in the facial area we start from a
\(4 \times 4 \times 4\) grid and only increase the number of control \(4 \times 4 \times 4\) grid and only increase the number of
points in one dimension, yielding a resolution of control--points in one dimension, yielding a resolution of
\(7 \times 4 \times 4\) and \(4 \times 4 \times 7\) respectively. We \(7 \times 4 \times 4\) and \(4 \times 4 \times 7\) respectively. We
visualized those two grids in figure \ref{fig:3dgridres}. visualized those two grids in figure \ref{fig:3dgridres}.
@ -1374,10 +1381,10 @@ $4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \t
\hline \hline
0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0) 0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0)
\end{tabular} \end{tabular}
\caption[Correlation between variability and fitting error for 3D]{Correlation \caption[Correlation between *variability* and fitting error for 3D]{Correlation
between variability and fitting error for the 3D fitting scenario.\newline between *variability* and fitting error for the 3D fitting scenario.\newline
Displayed are the negated Spearman coefficients with the corresponding p-values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for three cases of increasing variability ($\mathrm{X} \in [4,5,7], in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7],
\mathrm{Y} \in [4,5,6]$). \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
\label{tab:3dvar} \label{tab:3dvar}
@ -1399,40 +1406,42 @@ we anticipated, which shows that the evolutionary algorithm we employed
is capable of correcting a purposefully created \glqq bad\grqq ~grid. is capable of correcting a purposefully created \glqq bad\grqq ~grid.
Also this confirms, that in our cases the number of control--points is Also this confirms, that in our cases the number of control--points is
more important for quality than their placement, which is captured by more important for quality than their placement, which is captured by
the variability via the rank of the deformation--matrix. the \emph{variability} via the rank of the deformation--matrix.
Overall the correlation between \emph{variability} and fitness--error
were \emph{significant} and showed a \emph{very strong} correlation in
all our tests. The detailed correlation--coefficients are given in table
\ref{tab:3dvar} alongside their p--values.
As introduces in section \ref{sec:impl:grid} and visualized in figure
\ref{fig:enoughCP}, we know, that not all control--points have to
necessarily contribute to the parametrization of our 3D--model. Because
we are starting from a sphere, some control--points are too far away
from the surface to contribute to the deformation at all.
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
starts with a regular \(9 \times 9\) grid on a perfect circle. To make
sure we observe this, we evaluated the \emph{variability} for 100
randomly moved \(10 \times 10 \times 10\) grids on the sphere we start
out with.
\begin{figure}[hbt] \begin{figure}[hbt]
\centering \centering
\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png} \includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png}
\caption[Histogram of ranks of high--resolution deformation--matrices]{ \caption[Histogram of ranks of high--resolution deformation--matrices]{
Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$ Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$
control--points each showing in this case how many control points are actually control--points each showing in this case how many control--points are actually
used in the calculations. used in the calculations.
} }
\label{fig:histrank3d} \label{fig:histrank3d}
\end{figure} \end{figure}
Overall the correlation between variability and fitness--error were As the \emph{variability} is defined by
\emph{significant} and showed a \emph{very strong} correlation in all \(\frac{\mathrm{rank}(\vec{U})}{n}\) we can easily recover the rank of
our tests. The detailed correlation--coefficients are given in table the deformation--matrix \(\vec{U}\). The results are shown in the
\ref{tab:3dvar} alongside their p--values. histogram in figure \ref{fig:histrank3d}. Especially in the centre of
the sphere and in the corners of our grid we effectively loose
As introduces in section \ref{sec:impl:grid} and visualized in figure control--points for our parametrization.
\ref{fig:enoughCP}, we know, that not all control points have to
necessarily contribute to the parametrization of our 3D--model. Because
we are starting from a sphere, some control-points are too far away from
the surface to contribute to the deformation at all.
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
starts with a regular \(9 \times 9\) grid on a perfect circle. To make
sure we observe this, we evaluated the variability for 100 randomly
moved \(10 \times 10 \times 10\) grids on the sphere we start out with.
As the variability is defined by \(\frac{\mathrm{rank}(\vec{U})}{n}\) we
can easily recover the rank of the deformation--matrix \(\vec{U}\). The
results are shown in the histogram in figure \ref{fig:histrank3d}.
Especially in the centre of the sphere and in the corners of our grid we
effectively loose control--points for our parametrization.
This of course yields a worse error as when those control--points would This of course yields a worse error as when those control--points would
be put to use and one should expect a loss in quality evident by a be put to use and one should expect a loss in quality evident by a
@ -1440,7 +1449,7 @@ higher reconstruction--error opposed to a grid where they are used.
Sadly we could not run a in--depth test on this due to computational Sadly we could not run a in--depth test on this due to computational
limitations. limitations.
Nevertheless this hints at the notion, that variability is a good Nevertheless this hints at the notion, that \emph{variability} is a good
measure for the overall quality of a fit. measure for the overall quality of a fit.
\subsection{Regularity}\label{regularity-2} \subsection{Regularity}\label{regularity-2}
@ -1468,21 +1477,21 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
\cline{2-4} \cline{2-4}
\multicolumn{3}{c}{} & all: 0.15 (0) \T \multicolumn{3}{c}{} & all: 0.15 (0) \T
\end{tabular} \end{tabular}
\caption[Correlation between regularity and iterations for 3D]{Correlation \caption[Correlation between *regularity* and iterations for 3D]{Correlation
between regularity and number of iterations for the 3D fitting scenario. between *regularity* and number of iterations for the 3D fitting scenario.
Displayed are the negated Spearman coefficients with the corresponding p--values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
\label{tab:3dreg} \label{tab:3dreg}
\end{table} \end{table}
Opposed to the predictions of variability our test on regularity gave a Opposed to the predictions of \emph{variability} our test on
mixed result --- similar to the 1D--case. \emph{regularity} gave a mixed result --- similar to the 1D--case.
In roughly half of the scenarios we have a \emph{significant}, but In roughly half of the scenarios we have a \emph{significant}, but
\emph{weak} to \emph{moderate} correlation between regularity and number \emph{weak} to \emph{moderate} correlation between \emph{regularity} and
of iterations. On the other hand in the scenarios where we increased the number of iterations. On the other hand in the scenarios where we
number of control--points, namely \(125\) for the increased the number of control--points, namely \(125\) for the
\(5 \times 5 \times 5\) grid and \(216\) for the \(6 \times 6 \times 6\) \(5 \times 5 \times 5\) grid and \(216\) for the \(6 \times 6 \times 6\)
grid we found a \emph{significant}, but \emph{weak} grid we found a \emph{significant}, but \emph{weak}
\textbf{anti}--correlation when taking all three tests into \textbf{anti}--correlation when taking all three tests into
@ -1491,14 +1500,14 @@ contradict the findings/trends for the sets with \(64\), \(80\), and
\(112\) control--points (first two rows of table \ref{tab:3dreg}). \(112\) control--points (first two rows of table \ref{tab:3dreg}).
Taking all results together we only find a \emph{very weak}, but Taking all results together we only find a \emph{very weak}, but
\emph{significant} link between regularity and the number of iterations \emph{significant} link between \emph{regularity} and the number of
needed for the algorithm to converge. iterations needed for the algorithm to converge.
\begin{figure}[!htb] \begin{figure}[!htb]
\centering \centering
\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png} \includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png}
\caption[Regularity for different 3D--grids]{ \caption[Regularity for different 3D--grids]{
Plots of regularity against number of iterations for various scenarios together Plots of *regularity* against number of iterations for various scenarios together
with a linear fit to indicate trends.} with a linear fit to indicate trends.}
\label{fig:resreg3d} \label{fig:resreg3d}
\end{figure} \end{figure}
@ -1509,10 +1518,10 @@ The regularity--criterion first behaves as we would like to, but then
switches to behave exactly opposite to our expectations, as can be seen switches to behave exactly opposite to our expectations, as can be seen
in the first three plots. While the number of control--points increases in the first three plots. While the number of control--points increases
from red to green to blue and the number of iterations decreases, the from red to green to blue and the number of iterations decreases, the
regularity seems to increase at first, but then decreases again on \emph{regularity} seems to increase at first, but then decreases again
higher grid--resolutions. on higher grid--resolutions.
This can be an artefact of the definition of regularity, as it is This can be an artefact of the definition of \emph{regularity}, as it is
defined by the inverse condition--number of the deformation--matrix defined by the inverse condition--number of the deformation--matrix
\(\vec{U}\), being the fraction \(\vec{U}\), being the fraction
\(\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}\) between the \(\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}\) between the
@ -1524,12 +1533,12 @@ small minimal right singular value occurring on higher grid--resolutions
seems likely the problem. seems likely the problem.
Adding to this we also noted, that in the case of the Adding to this we also noted, that in the case of the
\(10 \times 10 \times 10\)--grid the regularity was always \(0\), as a \(10 \times 10 \times 10\)--grid the \emph{regularity} was always \(0\),
non--contributing control-point yields a \(0\)--column in the as a non--contributing control--point yields a \(0\)--column in the
deformation--matrix, thus letting \(\sigma_\mathrm{min} = 0\). A better deformation--matrix, thus letting \(\sigma_\mathrm{min} = 0\). A better
definition for regularity (i.e.~using the smallest non--zero right definition for \emph{regularity} (i.e.~using the smallest non--zero
singular value) could solve this particular issue, but not fix the trend right singular value) could solve this particular issue, but not fix the
we noticed above. trend we noticed above.
\subsection{Improvement Potential}\label{improvement-potential-2} \subsection{Improvement Potential}\label{improvement-potential-2}
@ -1556,8 +1565,8 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
\cline{2-4} \cline{2-4}
\multicolumn{3}{c}{} & all: 0.95 (0) \T \multicolumn{3}{c}{} & all: 0.95 (0) \T
\end{tabular} \end{tabular}
\caption[Correlation between improvement--potential and fitting--error for 3D]{Correlation \caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation
between improvement--potential and fitting--error for the 3D fitting scenario. between *improvement potential* and fitting--error for the 3D fitting scenario.
Displayed are the negated Spearman coefficients with the corresponding p--values Displayed are the negated Spearman coefficients with the corresponding p--values
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
\newline Note: Not significant results are marked in \textcolor{red}{red}.} \newline Note: Not significant results are marked in \textcolor{red}{red}.}
@ -1576,21 +1585,22 @@ gradients anyway.
\centering \centering
\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png} \includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png}
\caption[Improvement potential for different 3D--grids]{ \caption[Improvement potential for different 3D--grids]{
Plots of improvement potential against error given by our fitness--function Plots of *improvement potential* against error given by our *fitness--function*
after convergence together with a linear fit of each of the plotted data to after convergence together with a linear fit of each of the plotted data to
indicate trends.} indicate trends.}
\label{fig:resimp3d} \label{fig:resimp3d}
\end{figure} \end{figure}
We plotted our findings on the improvement potential in a similar way as We plotted our findings on the \emph{improvement potential} in a similar
we did before with the regularity. In figure \ref{fig:resimp3d} one can way as we did before with the \emph{regularity}. In figure
clearly see the correlation and the spread within each setup and the \ref{fig:resimp3d} one can clearly see the correlation and the spread
behaviour when we increase the number of control--points. within each setup and the behaviour when we increase the number of
control--points.
Along with this we also give the Spearman--coefficients along with their Along with this we also give the Spearman--coefficients along with their
p--values in table \ref{tab:3dimp}. Within one scenario we only find a p--values in table \ref{tab:3dimp}. Within one scenario we only find a
\emph{weak} to \emph{moderate} correlation between the improvement \emph{weak} to \emph{moderate} correlation between the \emph{improvement
potential and the fitting error, but all findings (except for potential} and the fitting error, but all findings (except for
\(7 \times 4 \times 4\) and \(6 \times 6 \times 6\)) are significant. \(7 \times 4 \times 4\) and \(6 \times 6 \times 6\)) are significant.
If we take multiple datasets into account the correlation is \emph{very If we take multiple datasets into account the correlation is \emph{very
@ -1598,30 +1608,30 @@ strong} and \emph{significant}, which is good, as this functions as a
litmus--test, because the quality is naturally tied to the number of litmus--test, because the quality is naturally tied to the number of
control--points. control--points.
All in all the improvement potential seems to be a good and sensible All in all the \emph{improvement potential} seems to be a good and
measure of quality, even given gradients of varying quality. sensible measure of quality, even given gradients of varying quality.
Lastly, a small note on the behaviour of improvement potential and Lastly, a small note on the behaviour of \emph{improvement potential}
convergence speed, as we used this in the 1D case to argue, why the and convergence speed, as we used this in the 1D case to argue, why the
\emph{regularity} defied our expectations. As a contrast we wanted to \emph{regularity} defied our expectations. As a contrast we wanted to
show, that improvement potential cannot serve for good predictions of show, that \emph{improvement potential} cannot serve for good
the convergence speed. In figure \ref{fig:imp1d3d} we show improvement predictions of the convergence speed. In figure \ref{fig:imp1d3d} we
potential against number of iterations for both scenarios. As one can show \emph{improvement potential} against number of iterations for both
see, in the 1D scenario we have a \emph{strong} and \emph{significant} scenarios. As one can see, in the 1D scenario we have a \emph{strong}
correlation (with \(-r_S = -0.72\), \(p = 0\)), whereas in the 3D and \emph{significant} correlation (with \(-r_S = -0.72\), \(p = 0\)),
scenario we have the opposite \emph{significant} and \emph{strong} whereas in the 3D scenario we have the opposite \emph{significant} and
effect (with \(-r_S = 0.69\), \(p=0\)), so these correlations clearly \emph{strong} effect (with \(-r_S = 0.69\), \(p=0\)), so these
seem to be dependent on the scenario and are not suited for correlations clearly seem to be dependent on the scenario and are not
generalization. suited for generalization.
\begin{figure}[hbt] \begin{figure}[hbt]
\centering \centering
\includegraphics[width=\textwidth]{img/imp1d3d.png} \includegraphics[width=\textwidth]{img/imp1d3d.png}
\caption[Improvement potential and convergence speed for 1D and 3D--scenarios]{ \caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{
\newline \newline
Left: Improvement potential against convergence speed for the Left: *Improvement potential* against convergence speed for the
1D--scenario\newline 1D--scenario\newline
Right: Improvement potential against convergence speed for the 3D--scnario Right: *Improvement potential* against convergence speed for the 3D--scnario
} }
\label{fig:imp1d3d} \label{fig:imp1d3d}
\end{figure} \end{figure}
@ -1630,36 +1640,36 @@ Right: Improvement potential against convergence speed for the 3D--scnario
\label{sec:dis} \label{sec:dis}
In this thesis we took a look at the different criteria for evolvability In this thesis we took a look at the different criteria for
as introduced by Richter et al.\cite{anrichterEvol}, namely \emph{evolvability} as introduced by Richter et al.\cite{anrichterEvol},
\emph{variability}, \emph{regularity} and \emph{improvement potential} namely \emph{variability}, \emph{regularity} and \emph{improvement
under different setup--conditions. Where Richter et al. used \acf{RBF}, potential} under different setup--conditions. Where Richter et al. used
we employed \acf{FFD} to set up a low--complexity parametrization of a \acf{RBF}, we employed \acf{FFD} to set up a low--complexity
more complex vertex--mesh. parametrization of a more complex vertex--mesh.
In our findings we could show in the 1D--scenario, that there were In our findings we could show in the 1D--scenario, that there were
statistically significant very strong correlations between statistically \emph{significant} \emph{very strong} correlations between
\emph{variability and fitting error} (\(0.94\)) and \emph{variability and fitting error} (\(0.94\)) and \emph{improvement
\emph{improvement--potential and fitting error} (\(1.0\)) with potential and fitting error} (\(1.0\)) with comparable results than
comparable results than Richter et al. (with \(0.31\) to \(0.88\) for Richter et al. (with \(0.31\) to \(0.88\) for the former and \(0.75\) to
the former and \(0.75\) to \(0.99\) for the latter), whereas we found \(0.99\) for the latter), whereas we found only \emph{weak} correlations
only weak correlations for \emph{regularity and convergence--speed} for \emph{regularity and convergence--speed} (\(0.28\)) opposed to
(\(0.28\)) opposed to Richter et al. with \(0.39\) to Richter et al. with \(0.39\) to \(0.91\).\footnote{We only took
\(0.91\).\footnote{We only took statistically \emph{significant} results statistically \emph{significant} results into consideration when
into consideration when compiling these numbers. Details are given in compiling these numbers. Details are given in the respective chapters.}
the respective chapters.}
For the 3D--scenario our results show a very strong, significant For the 3D--scenario our results show a \emph{very strong},
correlation between \emph{variability and fitting error} with \(0.89\) \emph{significant} correlation between \emph{variability and fitting
to \(0.94\), which are pretty much in line with the findings of Richter error} with \(0.89\) to \(0.94\), which are pretty much in line with the
et al. (\(0.65\) to \(0.95\)). The correlation between \emph{improvement findings of Richter et al. (\(0.65\) to \(0.95\)). The correlation
potential and fitting error} behave similar, with our findings having a between \emph{improvement potential and fitting error} behave similar,
significant coefficient of \(0.3\) to \(0.95\) depending on the with our findings having a significant coefficient of \(0.3\) to
grid--resolution compared to the \(0.61\) to \(0.93\) from Richter et \(0.95\) depending on the grid--resolution compared to the \(0.61\) to
al. In the case of the correlation of \emph{regularity and convergence \(0.93\) from Richter et al. In the case of the correlation of
speed} we found very different (and often not significant) correlations \emph{regularity and convergence speed} we found very different (and
and anti--correlations ranging from \(-0.25\) to \(0.46\), whereas often not significant) correlations and anti--correlations ranging from
Richter et al. reported correlations between \(0.34\) to \(0.87\). \(-0.25\) to \(0.46\), whereas Richter et al. reported correlations
between \(0.34\) to \(0.87\).
Taking these results into consideration, one can say, that Taking these results into consideration, one can say, that
\emph{variability} and \emph{improvement potential} are very good \emph{variability} and \emph{improvement potential} are very good
@ -1683,14 +1693,11 @@ manipulation in \cite{anrichterEvol}, whereas we merely used an indirect
indirect manipulations, the usage of \acf{DM--FFD} could also work indirect manipulations, the usage of \acf{DM--FFD} could also work
better with the criteria we examined. This can also solve the problem of better with the criteria we examined. This can also solve the problem of
bad singular values for the \emph{regularity} as the incorporation of bad singular values for the \emph{regularity} as the incorporation of
the parametrization of the points on the surface, which are the the parametrization of the points on the surface --- which are the
essential part of a direct--manipulation, could cancel out a bad essential part of a direct--manipulation --- could cancel out a bad
control--grid as the bad control--points are never or negligibly used to control--grid as the bad control--points are never or negligibly used to
parametrize those surface--points. parametrize those surface--points.
\improvement[inline]{Bibliotheksverzeichnis links anpassen. DOI überschreibt
Direktlinks des Autors.}
% \backmatter % \backmatter
\cleardoublepage \cleardoublepage
@ -1725,10 +1732,10 @@ Direktlinks des Autors.}
% \addtocounter{chapter}{1} % \addtocounter{chapter}{1}
\newpage \newpage
% \listoftables % \listoftables
\listoftodos % \listoftodos
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs} % \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
% \addtocounter{chapter}{1} % \addtocounter{chapter}{1}
\newpage % \newpage
% \printindex % \printindex
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%% %%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%

View File

@ -175,10 +175,10 @@ $body$
% \addtocounter{chapter}{1} % \addtocounter{chapter}{1}
\newpage \newpage
% \listoftables % \listoftables
\listoftodos % \listoftodos
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs} % \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
% \addtocounter{chapter}{1} % \addtocounter{chapter}{1}
\newpage % \newpage
% \printindex % \printindex
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%% %%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%

View File

@ -14,7 +14,7 @@ set xtics norangelimit
set xtics () set xtics ()
set ytics border in scale 1,0.5 nomirror norotate autojustify set ytics border in scale 1,0.5 nomirror norotate autojustify
set title "Fitting Errors of 1D Function Approximation for various grids\n" set title "Fitting Errors of 1D Function Approximation for various grids\n"
set ylabel "Squared Error of Vertex-Difference" set ylabel "Fitting-Error according to fitness-function"
header ="`head -1 errors.csv | sed -s "s/\"//g" | sed -s "s/,/ /g"`" header ="`head -1 errors.csv | sed -s "s/\"//g" | sed -s "s/,/ /g"`"
set for [i=1:words(header)] xtics (word(header,i) i) set for [i=1:words(header)] xtics (word(header,i) i)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.1 KiB

After

Width:  |  Height:  |  Size: 5.3 KiB