done!
This commit is contained in:
parent
103b629b84
commit
18727a857d
@ -69,7 +69,7 @@
|
||||
|
||||
Acmid = {2079770},
|
||||
Doi = {10.1007/11844297_36},
|
||||
ISBN = {3-540-38990-3, 978-3-540-38990-3},
|
||||
ISBN = {978-3-540-38990-3},
|
||||
Location = {Reykjavik, Iceland},
|
||||
Numpages = {10},
|
||||
Url = {http://dx.doi.org/10.1007/11844297_36}
|
||||
@ -86,13 +86,14 @@
|
||||
Url = {https://www.researchgate.net/profile/Yaneer_Bar-Yam/publication/225104044_Complex_Engineered_Systems_A_New_Paradigm/links/59107f20a6fdccbfd57eb84d/Complex-Engineered-Systems-A-New-Paradigm.pdf}
|
||||
}
|
||||
|
||||
@Article{anrichterEvol,
|
||||
@InProceedings{anrichterEvol,
|
||||
Title = {Evolvability as a Quality Criterion for Linear Deformation Representations in Evolutionary Optimization},
|
||||
Author = {Richter, Andreas and Achenbach, Jascha and Menzel, Stefan and Botsch, Mario},
|
||||
Author = {Richter, Andreas and Achenbach, Jascha and enzel, Stefan and Botsch, Mario},
|
||||
Year = {2016},
|
||||
Note = {\url{http://graphics.uni-bielefeld.de/publications/cec16.pdf}, \url{https://pub.uni-bielefeld.de/publication/2902698}},
|
||||
Note = {\url{http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=cec16.pdf}, \url{https://pub.uni-bielefeld.de/publication/2902698}},
|
||||
|
||||
Booktitle = {IEEE Congress on Evolutionary Computation},
|
||||
Pages = {901--910},
|
||||
Location = {Vancouver, Canada},
|
||||
Publisher = {IEEE}
|
||||
}
|
||||
@ -100,12 +101,12 @@
|
||||
@InProceedings{richter2015evolvability,
|
||||
Title = {Evolvability of representations in complex system engineering: a survey},
|
||||
Author = {Richter, Andreas and Botsch, Mario and Menzel, Stefan},
|
||||
Booktitle = {Evolutionary Computation (CEC), 2015 IEEE Congress on},
|
||||
Booktitle = {2015 IEEE Congress on Evolutionary Computation (CEC)},
|
||||
Year = {2015},
|
||||
Organization = {IEEE},
|
||||
Pages = {1327--1335},
|
||||
|
||||
Url = {http://www.graphics.uni-bielefeld.de/publications/cec15.pdf}
|
||||
Url = {http://www.graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=cec15.pdf}
|
||||
}
|
||||
|
||||
@InBook{Rothlauf2006,
|
||||
@ -168,8 +169,6 @@
|
||||
Year = {2012},
|
||||
Number = {5},
|
||||
Volume = {27},
|
||||
|
||||
Url = {http://jcst.ict.ac.cn:8080/jcst/EN/article/downloadArticleFile.do?attachType=PDF\&id=9543}
|
||||
}
|
||||
@article{giannelli2012thb,
|
||||
title={THB-splines: The truncated basis for hierarchical splines},
|
||||
@ -180,17 +179,17 @@
|
||||
pages={485--498},
|
||||
year={2012},
|
||||
publisher={Elsevier},
|
||||
url={https://pdfs.semanticscholar.org/a858/aa68da617ad9d41de021f6807cc422002258.pdf},
|
||||
note={\url{https://pdfs.semanticscholar.org/a858/aa68da617ad9d41de021f6807cc422002258.pdf}},
|
||||
doi={10.1016/j.cagd.2012.03.025},
|
||||
}
|
||||
@article{brunet2010contributions,
|
||||
title={Contributions to parametric image registration and 3d surface reconstruction},
|
||||
author={Brunet, Florent},
|
||||
journal={European Ph. D. in Computer Vision, Universit{\'e} dAuvergne, Cl{\'e}rmont-Ferrand, France, and Technische Universitat Munchen, Germany},
|
||||
journal={European Ph. D. in Computer Vision, Universit{\'e} dAuvergne, Cl{\'e}rmont-Ferrand, France, and Technische Universität München, Germany},
|
||||
year={2010},
|
||||
url={http://www.brnt.eu/phd/}
|
||||
}
|
||||
@article{aschenbach2015,
|
||||
@InProceedings{aschenbach2015,
|
||||
author = {Achenbach, Jascha and Zell, Eduard and Botsch, Mario},
|
||||
booktitle = {Vision, Modeling \& Visualization},
|
||||
journal = {Proceedings of Vision, Modeling and Visualization},
|
||||
@ -199,7 +198,7 @@
|
||||
publisher = {Eurographics Association},
|
||||
title = {Accurate Face Reconstruction through Anisotropic Fitting and Eye Correction},
|
||||
year = {2015},
|
||||
url = {http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=vmv15.pdf},
|
||||
note = {\url{http://graphics.uni-bielefeld.de/publications/disclaimer.php?dlurl=vmv15.pdf}},
|
||||
ISBN = {978-3-905674-95-8},
|
||||
}
|
||||
@article{hauke2011comparison,
|
||||
@ -247,7 +246,7 @@
|
||||
publisher={IEEE},
|
||||
url={https://www.researchgate.net/profile/Marc_Schoenauer/publication/223460374_Parameter_Control_in_Evolutionary_Algorithms/links/545766440cf26d5090a9b951.pdf},
|
||||
}
|
||||
@article{rechenberg1973evolutionsstrategie,
|
||||
@book{rechenberg1973evolutionsstrategie,
|
||||
title={Evolutionsstrategie Optimierung technischer Systeme nach Prinzipien der biologischen Evolution},
|
||||
author={Rechenberg, Ingo},
|
||||
year={1973},
|
||||
|
@ -44,22 +44,23 @@
|
||||
\vspace*{\stretch{4}}
|
||||
|
||||
\begin{center}
|
||||
\hspace{0.99cm} {\huge\bfseries Evaluation of the Performance\\[4mm]
|
||||
\hspace{0.99cm} of Randomized FFD Control Grids}\\[28mm]
|
||||
\hspace{0.99cm} {\LARGE Master Thesis}\\[3mm]
|
||||
\hspace{0.99cm} {\Large {\normalsize at the}\\[4mm]
|
||||
\hspace{0.99cm} AG Computer Graphics}\\[2mm]
|
||||
\hspace{0.99cm} at the Faculty of Technology\\
|
||||
\hspace{0.99cm} of Bielefeld University\\[5mm]
|
||||
\hspace{0.99cm} {\Large by}\\[5mm]
|
||||
\hspace{0.99cm} {\LARGE Stefan Dresselhaus}\\[8mm]
|
||||
\hspace{0.99cm} {\large \today}
|
||||
\hspace{1.5cm} {\huge\bfseries Evaluation of the Performance\\[-3mm]
|
||||
\hspace{1.5cm} of Randomized\\[4mm]
|
||||
\hspace{1.5cm} FFD Control Grids}\\[25mm]
|
||||
\hspace{1.5cm} {\LARGE Master Thesis}\\
|
||||
\hspace{1.5cm} {\Large {\normalsize at the}\\
|
||||
\hspace{1.5cm} AG Computer Graphics}\\[2mm]
|
||||
\hspace{1.5cm} at the Faculty of Technology\\
|
||||
\hspace{1.5cm} of Bielefeld University\\[3mm]
|
||||
\hspace{1.5cm} {\Large by}\\[3mm]
|
||||
\hspace{1.5cm} {\LARGE Stefan Dresselhaus}\\[5mm]
|
||||
\hspace{1.5cm} {\large \today}
|
||||
\end{center}
|
||||
\vspace*{\stretch{2}}
|
||||
\begin{center}
|
||||
\begin{tabular}{lrl}
|
||||
\hspace{0.99cm} Supervisor:~&Prof.~Dr.~&Mario Botsch\\
|
||||
\hspace{0.99cm} &Dipl.~Math.~&Andreas~Richter
|
||||
\hspace{1.5cm} Supervisor:~&Prof.~Dr.~&Mario Botsch\\[-5mm]
|
||||
\hspace{1.5cm} &Dipl.~Math.~&Andreas~Richter
|
||||
\end{tabular}
|
||||
\end{center}
|
||||
\vspace*{\stretch{.2}}
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 59 KiB |
@ -15,7 +15,10 @@
|
||||
id="svg2"
|
||||
version="1.1"
|
||||
inkscape:version="0.91 r13725"
|
||||
sodipodi:docname="enoughCP.svg">
|
||||
sodipodi:docname="enoughCP.svg"
|
||||
inkscape:export-filename="/home/sdressel/git/graphene/ausarbeitung/arbeit/img/enoughCP.png"
|
||||
inkscape:export-xdpi="150"
|
||||
inkscape:export-ydpi="150">
|
||||
<defs
|
||||
id="defs4" />
|
||||
<sodipodi:namedview
|
||||
@ -26,7 +29,7 @@
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:zoom="1.979899"
|
||||
inkscape:cx="128.58458"
|
||||
inkscape:cx="127.54562"
|
||||
inkscape:cy="179.18795"
|
||||
inkscape:document-units="px"
|
||||
inkscape:current-layer="layer1"
|
||||
@ -39,9 +42,9 @@
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1141"
|
||||
inkscape:window-x="1680"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-height="1015"
|
||||
inkscape:window-x="1920"
|
||||
inkscape:window-y="36"
|
||||
inkscape:window-maximized="1" />
|
||||
<metadata
|
||||
id="metadata7">
|
||||
@ -51,7 +54,7 @@
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
<dc:title />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
@ -211,13 +214,13 @@
|
||||
cy="578.05811"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-5-9"
|
||||
cx="367.18387"
|
||||
cy="578.05811"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-35-2"
|
||||
cx="403.05746"
|
||||
cy="578.05811"
|
||||
@ -265,7 +268,7 @@
|
||||
cy="612.91461"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-7-9"
|
||||
cx="331.31027"
|
||||
cy="612.91461"
|
||||
@ -283,7 +286,7 @@
|
||||
cy="612.91461"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-62-8"
|
||||
cx="438.93106"
|
||||
cy="612.91461"
|
||||
@ -325,7 +328,7 @@
|
||||
cy="647.77112"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-7-3"
|
||||
cx="331.31027"
|
||||
cy="647.77112"
|
||||
@ -343,7 +346,7 @@
|
||||
cy="647.77112"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-62-6"
|
||||
cx="438.93106"
|
||||
cy="647.77112"
|
||||
@ -391,13 +394,13 @@
|
||||
cy="682.62762"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-5-5"
|
||||
cx="367.18387"
|
||||
cy="682.62762"
|
||||
r="5" />
|
||||
<circle
|
||||
style="opacity:1;fill:#d4aa00;fill-opacity:1;stroke:#000000"
|
||||
style="opacity:1;fill:#ff0000;fill-opacity:1;stroke:#000000"
|
||||
id="path4138-35-4"
|
||||
cx="403.05746"
|
||||
cy="682.62762"
|
||||
|
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
325
arbeit/ma.md
325
arbeit/ma.md
@ -49,19 +49,19 @@ etc.), the translation of the problem--domain into a simple parametric
|
||||
representation (the *genome*) can be challenging.
|
||||
|
||||
This translation is often necessary as the target of the optimization may have
|
||||
too many degrees of freedom. In the example of an aerodynamic simulation of drag
|
||||
onto an object, those object--designs tend to have a high number of vertices to
|
||||
adhere to various requirements (visual, practical, physical, etc.). A simpler
|
||||
representation of the same object in only a few parameters that manipulate the
|
||||
whole in a sensible matter are desirable, as this often decreases the
|
||||
computation time significantly.
|
||||
too many degrees of freedom for a reasonable computation. In the example of an
|
||||
aerodynamic simulation of drag onto an object, those object--designs tend to
|
||||
have a high number of vertices to adhere to various requirements (visual,
|
||||
practical, physical, etc.). A simpler representation of the same object in only
|
||||
a few parameters that manipulate the whole in a sensible matter are desirable,
|
||||
as this often decreases the computation time significantly.
|
||||
|
||||
Additionally one can exploit the fact, that drag in this case is especially
|
||||
sensitive to non--smooth surfaces, so that a smooth local manipulation of the
|
||||
surface as a whole is more advantageous than merely random manipulation of the
|
||||
vertices.
|
||||
|
||||
The quality of such a low-dimensional representation in biological evolution is
|
||||
The quality of such a low--dimensional representation in biological evolution is
|
||||
strongly tied to the notion of *evolvability*\cite{wagner1996complex}, as the
|
||||
parametrization of the problem has serious implications on the convergence speed
|
||||
and the quality of the solution\cite{Rothlauf2006}.
|
||||
@ -80,14 +80,14 @@ One example of such a general representation of an object is to generate random
|
||||
points and represent vertices of an object as distances to these points --- for
|
||||
example via \acf{RBF}. If one (or the algorithm) would move such a point the
|
||||
object will get deformed only locally (due to the \ac{RBF}). As this results in
|
||||
a simple mapping from the parameter-space onto the object one can try out
|
||||
a simple mapping from the parameter--space onto the object one can try out
|
||||
different representations of the same object and evaluate which criteria may be
|
||||
suited to describe this notion of *evolvability*. This is exactly what Richter
|
||||
et al.\cite{anrichterEvol} have done.
|
||||
|
||||
As we transfer the results of Richter et al.\cite{anrichterEvol} from using
|
||||
\acf{RBF} as a representation to manipulate geometric objects to the use of
|
||||
\acf{FFD} we will use the same definition for evolvability the original author
|
||||
\acf{FFD} we will use the same definition for *evolvability* the original author
|
||||
used, namely *regularity*, *variability*, and *improvement potential*. We
|
||||
introduce these term in detail in Chapter \ref{sec:intro:rvi}. In the original
|
||||
publication the author could show a correlation between these
|
||||
@ -95,7 +95,7 @@ evolvability--criteria with the quality and convergence speed of such
|
||||
optimization.
|
||||
|
||||
We will replicate the same setup on the same objects but use \acf{FFD} instead of
|
||||
\acf{RBF} to create a local deformation near the control points and evaluate if
|
||||
\acf{RBF} to create a local deformation near the control--points and evaluate if
|
||||
the evolution--criteria still work as a predictor for *evolvability* of the
|
||||
representation given the different deformation scheme, as suspected in
|
||||
\cite{anrichterEvol}.
|
||||
@ -106,8 +106,8 @@ take an abstract look at the definition of \ac{FFD} for a one--dimensional line
|
||||
(in \ref{sec:back:ffdgood}).
|
||||
Then we establish some background--knowledge of evolutionary algorithms (in
|
||||
\ref{sec:back:evo}) and why this is useful in our domain (in
|
||||
\ref{sec:back:evogood}) followed by the definition of the different evolvability
|
||||
criteria established in \cite{anrichterEvol} (in \ref {sec:intro:rvi}).
|
||||
\ref{sec:back:evogood}) followed by the definition of the different
|
||||
evolvability--criteria established in \cite{anrichterEvol} (in \ref {sec:intro:rvi}).
|
||||
|
||||
In Chapter \ref{sec:impl} we take a look at our implementation of \ac{FFD} and
|
||||
the adaptation for 3D--meshes that were used. Next, in Chapter \ref{sec:eval},
|
||||
@ -132,19 +132,20 @@ chapter \ref{3dffd}.
|
||||
|
||||
The main idea of \ac{FFD} is to create a function $s : [0,1[^d \mapsto
|
||||
\mathbb{R}^d$ that spans a certain part of a vector--space and is only linearly
|
||||
parametrized by some special control points $p_i$ and an constant
|
||||
parametrized by some special control--points $p_i$ and an constant
|
||||
attribution--function $a_i(u)$, so
|
||||
$$
|
||||
s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i}
|
||||
$$
|
||||
can be thought of a representation of the inside of the convex hull generated by
|
||||
the control points where each point can be accessed by the right $u \in [0,1[^d$.
|
||||
the control--points where each position inside can be accessed by the right
|
||||
$u \in [0,1[^d$.
|
||||
|
||||
\begin{figure}[!ht]
|
||||
\begin{center}
|
||||
\includegraphics[width=0.7\textwidth]{img/B-Splines.png}
|
||||
\end{center}
|
||||
\caption[Example of B-Splines]{Example of a parametrization of a line with
|
||||
\caption[Example of B--Splines]{Example of a parametrization of a line with
|
||||
corresponding deformation to generate a deformed objet}
|
||||
\label{fig:bspline}
|
||||
\end{figure}
|
||||
@ -184,7 +185,7 @@ $$\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,
|
||||
|
||||
For a B--Spline
|
||||
$$s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i$$
|
||||
these derivations yield $\frac{\partial^d}{\partial u} s(u) = 0$.
|
||||
these derivations yield $\left(\frac{\partial}{\partial u}\right)^d s(u) = 0$.
|
||||
|
||||
Another interesting property of these recursive polynomials is that they are
|
||||
continuous (given $d \ge 1$) as every $p_i$ gets blended in between $\tau_i$ and
|
||||
@ -193,21 +194,21 @@ in every step of the recursion.
|
||||
|
||||
This means that all changes are only a local linear combination between the
|
||||
control--point $p_i$ to $p_{i+d+1}$ and consequently this yields to the
|
||||
convex--hull--property of B-Splines --- meaning, that no matter how we choose
|
||||
convex--hull--property of B--Splines --- meaning, that no matter how we choose
|
||||
our coefficients, the resulting points all have to lie inside convex--hull of
|
||||
the control--points.
|
||||
|
||||
For a given point $v_i$ we can then calculate the contributions
|
||||
$n_{i,j}~:=~N_{j,d,\tau}$ of each control point $p_j$ to get the
|
||||
For a given point $s_i$ we can then calculate the contributions
|
||||
$u_{i,j}~:=~N_{j,d,\tau}$ of each control point $p_j$ to get the
|
||||
projection from the control--point--space into the object--space:
|
||||
$$
|
||||
v_i = \sum_j n_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
|
||||
s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
|
||||
$$
|
||||
or written for all points at the same time:
|
||||
$$
|
||||
\vec{v} = \vec{N} \vec{p}
|
||||
\vec{s} = \vec{U} \vec{p}
|
||||
$$
|
||||
where $\vec{N}$ is the $n \times m$ transformation--matrix (later on called
|
||||
where $\vec{U}$ is the $n \times m$ transformation--matrix (later on called
|
||||
**deformation matrix**) for $n$ object--space--points and $m$ control--points.
|
||||
|
||||
\begin{figure}[ht]
|
||||
@ -220,7 +221,7 @@ of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum
|
||||
up to one (partition of unity). In this example, we use B--Splines of degree 2.
|
||||
The horizontal segment below the abscissa axis represents the domain of
|
||||
influence of the B--splines basis function, i.e. the interval on which they are
|
||||
not null. At a given point, there are at most $ d+1$ non-zero B--Spline basis
|
||||
not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis
|
||||
functions (compact support).\grqq \newline
|
||||
Note, that Brunet starts his index at $-d$ opposed to our definition, where we
|
||||
start at $0$.}
|
||||
@ -228,8 +229,8 @@ start at $0$.}
|
||||
\end{figure}
|
||||
|
||||
Furthermore B--Spline--basis--functions form a partition of unity for all, but
|
||||
the first and last $d$ control-points\cite{brunet2010contributions}. Therefore
|
||||
we later on use the border-points $d+1$ times, such that $\sum_j n_{i,j} p_j = p_i$
|
||||
the first and last $d$ control--points\cite{brunet2010contributions}. Therefore
|
||||
we later on use the border--points $d+1$ times, such that $\sum_j u_{i,j} p_j = p_i$
|
||||
for these points.
|
||||
|
||||
The locality of the influence of each control--point and the partition of unity
|
||||
@ -240,8 +241,8 @@ was beautifully pictured by Brunet, which we included here as figure
|
||||
\label{sec:back:ffdgood}
|
||||
|
||||
The usage of \ac{FFD} as a tool for manipulating follows directly from the
|
||||
properties of the polynomials and the correspondence to the control points.
|
||||
Having only a few control points gives the user a nicer high--level--interface, as
|
||||
properties of the polynomials and the correspondence to the control--points.
|
||||
Having only a few control--points gives the user a nicer high--level--interface, as
|
||||
she only needs to move these points and the model follows in an intuitive
|
||||
manner. The deformation is smooth as the underlying polygon is smooth as well
|
||||
and affects as many vertices of the model as needed. Moreover the changes are
|
||||
@ -317,7 +318,7 @@ the *phenotypes* make certain behaviour observable (algorithmically through our
|
||||
*fitness--function*, biologically by the ability to survive and produce
|
||||
offspring). Any individual in our algorithm thus experience a biologically
|
||||
motivated life cycle of inheriting genes from the parents, modified by mutations
|
||||
occurring, performing according to a fitness--metric and generating offspring
|
||||
occurring, performing according to a fitness--metric, and generating offspring
|
||||
based on this. Therefore each iteration in the while--loop above is also often
|
||||
named generation.
|
||||
|
||||
@ -346,7 +347,7 @@ The main algorithm just repeats the following steps:
|
||||
- **Selection** takes a selection--function $s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu$ that
|
||||
selects from the previously generated $I^\lambda$ children and optionally also
|
||||
the parents (denoted by the set $Q$ in the algorithm) using the
|
||||
fitness--function $\Phi$. The result of this operation is the next Population
|
||||
*fitness--function* $\Phi$. The result of this operation is the next Population
|
||||
of $\mu$ individuals.
|
||||
|
||||
All these functions can (and mostly do) have a lot of hidden parameters that
|
||||
@ -370,7 +371,7 @@ also take ancestry, distance of genes or groups of individuals into account.
|
||||
\label{sec:back:evogood}
|
||||
|
||||
The main advantage of evolutionary algorithms is the ability to find optima of
|
||||
general functions just with the help of a given fitness--function. Components
|
||||
general functions just with the help of a given *fitness--function*. Components
|
||||
and techniques for evolutionary algorithms are specifically known to
|
||||
help with different problems arising in the domain of
|
||||
optimization\cite{weise2012evolutionary}. An overview of the typical problems
|
||||
@ -383,13 +384,13 @@ are shown in figure \ref{fig:probhard}.
|
||||
\end{figure}
|
||||
|
||||
Most of the advantages stem from the fact that a gradient--based procedure has
|
||||
only one point of observation from where it evaluates the next steps, whereas an
|
||||
evolutionary strategy starts with a population of guessed solutions. Because an
|
||||
evolutionary strategy can be modified according to the problem--domain (i.e. by
|
||||
the ideas given above) it can also approximate very difficult problems in an
|
||||
efficient manner and even self--tune parameters depending on the ancestry at
|
||||
runtime^[Some examples of this are explained in detail in
|
||||
\cite{eiben1999parameter}].
|
||||
usually only one point of observation from where it evaluates the next steps,
|
||||
whereas an evolutionary strategy starts with a population of guessed solutions.
|
||||
Because an evolutionary strategy can be modified according to the
|
||||
problem--domain (i.e. by the ideas given above) it can also approximate very
|
||||
difficult problems in an efficient manner and even self--tune parameters
|
||||
depending on the ancestry at runtime^[Some examples of this are explained in
|
||||
detail in \cite{eiben1999parameter}].
|
||||
|
||||
If an analytic best solution exists and is easily computable (i.e. because the
|
||||
error--function is convex) an evolutionary algorithm is not the right choice.
|
||||
@ -421,23 +422,23 @@ coordinates
|
||||
$$
|
||||
\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P}
|
||||
$$
|
||||
which is isomorphic to the former due to the linear correlation in the
|
||||
deformation. One can see in this way, that the way the deformation behaves lies
|
||||
solely in the entries of $\vec{U}$, which is why the three criteria focus on this.
|
||||
which is isomorphic to the former due to the linearity of the deformation. One
|
||||
can see in this way, that the way the deformation behaves lies solely in the
|
||||
entries of $\vec{U}$, which is why the three criteria focus on this.
|
||||
|
||||
### Variability
|
||||
|
||||
In \cite{anrichterEvol} *variability* is defined as
|
||||
$$\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},$$
|
||||
whereby $\vec{U}$ is the $n \times m$ deformation--Matrix used to map the $m$
|
||||
control points onto the $n$ vertices.
|
||||
control--points onto the $n$ vertices.
|
||||
|
||||
Given $n = m$, an identical number of control--points and vertices, this
|
||||
quotient will be $=1$ if all control points are independent of each other and
|
||||
quotient will be $=1$ if all control--points are independent of each other and
|
||||
the solution is to trivially move every control--point onto a target--point.
|
||||
|
||||
In praxis the value of $V(\vec{U})$ is typically $\ll 1$, because as
|
||||
there are only few control--points for many vertices, so $m \ll n$.
|
||||
In praxis the value of $V(\vec{U})$ is typically $\ll 1$, because there are only
|
||||
few control--points for many vertices, so $m \ll n$.
|
||||
|
||||
This criterion should correlate to the degrees of freedom the given
|
||||
parametrization has. This can be seen from the fact, that
|
||||
@ -459,7 +460,7 @@ value of the deformation--matrix $\vec{U}$.
|
||||
As we deform the given Object only based on the parameters as $\vec{p} \mapsto
|
||||
f(\vec{x} + \vec{U}\vec{p})$ this makes sure that $\|\vec{Up}\| \propto
|
||||
\|\vec{p}\|$ when $\kappa(\vec{U}) \approx 1$. The inversion of $\kappa(\vec{U})$
|
||||
is only performed to map the criterion--range to $[0..1]$, whereas $1$ is the
|
||||
is only performed to map the criterion--range to $[0..1]$, where $1$ is the
|
||||
optimal value and $0$ is the worst value.
|
||||
|
||||
On the one hand this criterion should be characteristic for numeric
|
||||
@ -470,7 +471,7 @@ to the notion of locality\cite{weise2012evolutionary,thorhauer2014locality}.
|
||||
### Improvement Potential
|
||||
|
||||
In contrast to the general nature of *variability* and *regularity*, which are
|
||||
agnostic of the fitness--function at hand, the third criterion should reflect a
|
||||
agnostic of the *fitness--function* at hand, the third criterion should reflect a
|
||||
notion of the potential for optimization, taking a guess into account.
|
||||
|
||||
Most of the times some kind of gradient $g$ is available to suggest a
|
||||
@ -509,7 +510,7 @@ As we have established in Chapter \ref{sec:back:ffd} we can define an
|
||||
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
|
||||
\end{equation}
|
||||
|
||||
Note that we only sum up the $\Delta$--displacements in the control points $c_i$ to get
|
||||
Note that we only sum up the $\Delta$--displacements in the control--points $c_i$ to get
|
||||
the change in position of the point we are interested in.
|
||||
|
||||
In this way every deformed vertex is defined by
|
||||
@ -539,8 +540,8 @@ and do a gradient--descend to approximate the value of $u$ up to an $\epsilon$ o
|
||||
|
||||
For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which
|
||||
converges into the least--squares solution. An exact solution of this problem is
|
||||
impossible most of the times, because we usually have way more vertices
|
||||
than control points ($\#v~\gg~\#c$).
|
||||
impossible most of the time, because we usually have way more vertices
|
||||
than control--points ($\#v~\gg~\#c$).
|
||||
|
||||
## Adaption of \ac{FFD} for a 3D--Mesh
|
||||
\label{3dffd}
|
||||
@ -550,7 +551,7 @@ chapter. But this time things get a bit more complicated. As we have a
|
||||
3--dimensional grid we may have a different amount of control--points in each
|
||||
direction.
|
||||
|
||||
Given $n,m,o$ control points in $x,y,z$--direction each Point on the curve is
|
||||
Given $n,m,o$ control--points in $x,y,z$--direction each Point on the curve is
|
||||
defined by
|
||||
$$V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.$$
|
||||
|
||||
@ -624,13 +625,13 @@ beneficial for a good behaviour of the evolutionary algorithm.
|
||||
|
||||
As mentioned in chapter \ref{sec:back:evo}, the way of choosing the
|
||||
representation to map the general problem (mesh--fitting/optimization in our
|
||||
case) into a parameter-space is very important for the quality and runtime of
|
||||
case) into a parameter--space is very important for the quality and runtime of
|
||||
evolutionary algorithms\cite{Rothlauf2006}.
|
||||
|
||||
Because our control--points are arranged in a grid, we can accurately represent
|
||||
each vertex--point inside the grids volume with proper B--Spline--coefficients
|
||||
between $[0,1[$ and --- as a consequence --- we have to embed our object into it
|
||||
(or create constant "dummy"-points outside).
|
||||
(or create constant "dummy"--points outside).
|
||||
|
||||
The great advantage of B--Splines is the local, direct impact of each
|
||||
control point without having a $1:1$--correlation, and a smooth deformation.
|
||||
@ -651,20 +652,20 @@ control--points.}
|
||||
One would normally think, that the more control--points you add, the better the
|
||||
result will be, but this is not the case for our B--Splines. Given any point
|
||||
$\vec{p}$ only the $2 \cdot (d-1)$ control--points contribute to the parametrization of
|
||||
that point^[Normally these are $d-1$ to each side, but at the boundaries the
|
||||
number gets increased to the inside to meet the required smoothness].
|
||||
This means, that a high resolution can have many control-points that are not
|
||||
that point^[Normally these are $d-1$ to each side, but at the boundaries border
|
||||
points get used multiple times to meet the number of points required].
|
||||
This means, that a high resolution can have many control--points that are not
|
||||
contributing to any point on the surface and are thus completely irrelevant to
|
||||
the solution.
|
||||
|
||||
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the four red
|
||||
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the red
|
||||
central points are not relevant for the parametrization of the circle. This
|
||||
leads to artefacts in the deformation--matrix $\vec{U}$, as the columns
|
||||
corresponding to those control--points are $0$.
|
||||
|
||||
This leads to useless increased complexity, as the parameters corresponding to
|
||||
those points will never have any effect, but a naive algorithm will still try to
|
||||
optimize them yielding numeric artefacts in the best and non--terminating or
|
||||
This also leads to useless increased complexity, as the parameters corresponding
|
||||
to those points will never have any effect, but a naive algorithm will still try
|
||||
to optimize them yielding numeric artefacts in the best and non--terminating or
|
||||
ill--defined solutions^[One example would be, when parts of an algorithm depend
|
||||
on the inverse of the minimal right singular value leading to a division by $0$.]
|
||||
at worst.
|
||||
@ -674,18 +675,18 @@ but this raises the question why they were introduced in the first place. We
|
||||
will address this in a special scenario in \ref{sec:res:3d:var}.
|
||||
|
||||
For our tests we chose different uniformly sized grids and added noise
|
||||
onto each control-point^[For the special case of the outer layer we only applied
|
||||
onto each control--point^[For the special case of the outer layer we only applied
|
||||
noise away from the object, so the object is still confined in the convex hull
|
||||
of the control--points.] to simulate different starting-conditions.
|
||||
of the control--points.] to simulate different starting--conditions.
|
||||
|
||||
# Scenarios for testing evolvability criteria using \ac{FFD}
|
||||
# Scenarios for testing evolvability--criteria using \ac{FFD}
|
||||
\label{sec:eval}
|
||||
|
||||
In our experiments we use the same two testing--scenarios, that were also used
|
||||
by \cite{anrichterEvol}. The first scenario deforms a plane into a shape
|
||||
originally defined in \cite{giannelli2012thb}, where we setup control-points in
|
||||
a 2--dimensional manner and merely deform in the height--coordinate to get the
|
||||
resulting shape.
|
||||
by Richter et al.\cite{anrichterEvol} The first scenario deforms a plane into a shape
|
||||
originally defined by Giannelli et al.\cite{giannelli2012thb}, where we setup
|
||||
control--points in a 2--dimensional manner and merely deform in the
|
||||
height--coordinate to get the resulting shape.
|
||||
|
||||
In the second scenario we increase the degrees of freedom significantly by using
|
||||
a 3--dimensional control--grid to deform a sphere into a face, so each control
|
||||
@ -717,7 +718,7 @@ including a wireframe--overlay of the vertices.}
|
||||
\label{fig:1dtarget}
|
||||
\end{figure}
|
||||
|
||||
As the starting-plane we used the same shape, but set all
|
||||
As the starting--plane we used the same shape, but set all
|
||||
$z$--coordinates to $0$, yielding a flat plane, which is partially already
|
||||
correct.
|
||||
|
||||
@ -728,10 +729,10 @@ of calculating the squared distances for each corresponding vertex
|
||||
\end{equation}
|
||||
where $t_i$ are the respective target--vertices to the parametrized
|
||||
source--vertices^[The parametrization is encoded in $\vec{U}$ and the initial
|
||||
position of the control points. See \ref{sec:ffd:adapt}] with the current
|
||||
position of the control--points. See \ref{sec:ffd:adapt}] with the current
|
||||
deformation--parameters $\vec{p} = (p_1,\dots, p_m)$. We can do this
|
||||
one--to--one--correspondence because we have exactly the same number of
|
||||
source and target-vertices do to our setup of just flattening the object.
|
||||
source and target--vertices do to our setup of just flattening the object.
|
||||
|
||||
This formula is also the least--squares approximation error for which we
|
||||
can compute the analytic solution $\vec{p^{*}} = \vec{U^+}\vec{t}$, yielding us
|
||||
@ -762,16 +763,16 @@ these Models can be seen in figure \ref{fig:3dtarget}.
|
||||
Opposed to the 1D--case we cannot map the source and target--vertices in a
|
||||
one--to--one--correspondence, which we especially need for the approximation of
|
||||
the fitting--error. Hence we state that the error of one vertex is the distance
|
||||
to the closest vertex of the other model and sum up the error from the
|
||||
respective source and target.
|
||||
to the closest vertex of the respective other model and sum up the error from
|
||||
the source and target.
|
||||
|
||||
We therefore define the *fitness--function* to be:
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} -
|
||||
\vec{s_i}\|_2^2}_{\textrm{source-to-target--distance}}
|
||||
\vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}}
|
||||
+ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} -
|
||||
\vec{t_i}\|_2^2}_{\textrm{target-to-source--distance}}
|
||||
\vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}}
|
||||
+ \lambda \cdot \textrm{regularization}(\vec{P})
|
||||
\label{eq:fit3d}
|
||||
\end{equation}
|
||||
@ -787,7 +788,7 @@ $n \times m$--matrix of calculated coefficients for the \ac{FFD} --- analog to
|
||||
the 1D case --- and finally $\vec{P}$ being the $m \times 3$--matrix of the
|
||||
control--grid defining the whole deformation.
|
||||
|
||||
As regularization-term we add a weighted Laplacian of the deformation that has
|
||||
As regularization--term we add a weighted Laplacian of the deformation that has
|
||||
been used before by Aschenbach et al.\cite[Section 3.2]{aschenbach2015} on
|
||||
similar models and was shown to lead to a more precise fit. The Laplacian
|
||||
\begin{equation}
|
||||
@ -812,7 +813,7 @@ ill--defined grids mentioned in section \ref{sec:impl:grid}.
|
||||
To compare our results to the ones given by Richter et al.\cite{anrichterEvol},
|
||||
we also use Spearman's rank correlation coefficient. Opposed to other popular
|
||||
coefficients, like the Pearson correlation coefficient, which measures a linear
|
||||
relationship between variables, the Spearmans's coefficient assesses \glqq how
|
||||
relationship between variables, the Spearman's coefficient assesses \glqq how
|
||||
well an arbitrary monotonic function can describe the relationship between two
|
||||
variables, without making any assumptions about the frequency distribution of
|
||||
the variables\grqq\cite{hauke2011comparison}.
|
||||
@ -846,18 +847,19 @@ well. We leave the parameters at their sensible defaults as further explained in
|
||||
\label{sec:proc:1d}
|
||||
|
||||
For our setup we first compute the coefficients of the deformation--matrix and
|
||||
use then the formulas for *variability* and *regularity* to get our predictions.
|
||||
use the formulas for *variability* and *regularity* to get our predictions.
|
||||
Afterwards we solve the problem analytically to get the (normalized) correct
|
||||
gradient that we use as guess for the *improvement potential*. To check we also
|
||||
consider a distorted gradient $\vec{g}_{\mathrm{d}}$
|
||||
gradient that we use as guess for the *improvement potential*. To further test
|
||||
the *improvement potential* we also consider a distorted gradient
|
||||
$\vec{g}_{\mathrm{d}}$:
|
||||
$$
|
||||
\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|}
|
||||
$$
|
||||
where $\mathbb{1}$ is the vector consisting of $1$ in every dimension,
|
||||
$\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}$ is the calculated correct gradient,
|
||||
and $\mu$ is used to blend between $\vec{g}_\mathrm{c}$ and $\mathbb{1}$. As
|
||||
we always start with a gradient of $p = \mathbb{0}$ this means shortens
|
||||
$\vec{g}_\mathrm{c} = \vec{p^{*}}$.
|
||||
we always start with a gradient of $p = \mathbb{0}$ this means we can shorten
|
||||
the definition of $\vec{g}_\mathrm{c}$ to $\vec{g}_\mathrm{c} = \vec{p^{*}}$.
|
||||
|
||||
\begin{figure}[ht]
|
||||
\begin{center}
|
||||
@ -870,9 +872,9 @@ random distortion to generate a testcase.}
|
||||
|
||||
We then set up a regular 2--dimensional grid around the object with the desired
|
||||
grid resolutions. To generate a testcase we then move the grid--vertices
|
||||
randomly inside the x--y--plane. As self-intersecting grids get tricky to solve
|
||||
with our implemented newtons--method we avoid the generation of such
|
||||
self--intersecting grids for our testcases (see section \ref{3dffd}).
|
||||
randomly inside the x--y--plane. As self--intersecting grids get tricky to solve
|
||||
with our implemented newtons--method (see section \ref{3dffd}) we avoid the
|
||||
generation of such self--intersecting grids for our testcases.
|
||||
|
||||
To achieve that we generated a gaussian distributed number with $\mu = 0, \sigma=0.25$
|
||||
and clamped it to the range $[-0.25,0.25]$. We chose such an $r \in [-0.25,0.25]$
|
||||
@ -899,11 +901,11 @@ In the case of our 1D--Optimization--problem, we have the luxury of knowing the
|
||||
analytical solution to the given problem--set. We use this to experimentally
|
||||
evaluate the quality criteria we introduced before. As an evolutional
|
||||
optimization is partially a random process, we use the analytical solution as a
|
||||
stopping-criteria. We measure the convergence speed as number of iterations the
|
||||
stopping--criteria. We measure the convergence speed as number of iterations the
|
||||
evolutional algorithm needed to get within $1.05 \times$ of the optimal solution.
|
||||
|
||||
We used different regular grids that we manipulated as explained in Section
|
||||
\ref{sec:proc:1d} with a different number of control points. As our grids have
|
||||
\ref{sec:proc:1d} with a different number of control--points. As our grids have
|
||||
to be the product of two integers, we compared a $5 \times 5$--grid with $25$
|
||||
control--points to a $4 \times 7$ and $7 \times 4$--grid with $28$
|
||||
control--points. This was done to measure the impact an \glqq improper\grqq \
|
||||
@ -924,7 +926,7 @@ Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.
|
||||
\label{fig:1dvar}
|
||||
\end{figure}
|
||||
|
||||
Variability should characterize the potential for design space exploration and
|
||||
*Variability* should characterize the potential for design space exploration and
|
||||
is defined in terms of the normalized rank of the deformation matrix $\vec{U}$:
|
||||
$V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}$, whereby $n$ is the number of
|
||||
vertices.
|
||||
@ -933,27 +935,27 @@ grid), we have merely plotted the errors in the box plot in figure
|
||||
\ref{fig:1dvar}
|
||||
|
||||
It is also noticeable, that although the $7 \times 4$ and $4 \times 7$ grids
|
||||
have a higher variability, they perform not better than the $5 \times 5$ grid.
|
||||
have a higher *variability*, they perform not better than the $5 \times 5$ grid.
|
||||
Also the $7 \times 4$ and $4 \times 7$ grids differ distinctly from each other
|
||||
with a mean$\pm$sigma of $233.09 \pm 12.32$ for the former and $286.32 \pm 22.36$ for the
|
||||
latter, although they have the same number of control--points. This is an
|
||||
indication of an impact a proper or improper grid--setup can have. We do not
|
||||
draw scientific conclusions from these findings, as more research on non-squared
|
||||
draw scientific conclusions from these findings, as more research on non--squared
|
||||
grids seem necessary.
|
||||
|
||||
Leaving the issue of the grid--layout aside we focused on grids having the same
|
||||
number of prototypes in every dimension. For the $5 \times 5$, $7 \times 7$ and
|
||||
$10 \times 10$ grids we found a *very strong* correlation ($-r_S = 0.94, p = 0$)
|
||||
between the variability and the evolutionary error.
|
||||
between the *variability* and the evolutionary error.
|
||||
|
||||
### Regularity
|
||||
|
||||
\begin{figure}[tbh]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png}
|
||||
\caption[Improvement potential and regularity vs. steps]{\newline
|
||||
Left: Improvement potential against steps until convergence\newline
|
||||
Right: Regularity against steps until convergence\newline
|
||||
\caption[Improvement potential and regularity against iterations]{\newline
|
||||
Left: *Improvement potential* against number of iterations until convergence\newline
|
||||
Right: *Regularity* against number of iterations until convergence\newline
|
||||
Coloured by their grid--resolution, both with a linear fit over the whole
|
||||
dataset.}
|
||||
\label{fig:1dreg}
|
||||
@ -966,15 +968,15 @@ $5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\
|
||||
\hline
|
||||
$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$)
|
||||
\end{tabular}
|
||||
\caption[Correlation 1D Regularity/Steps]{Spearman's correlation (and p-values)
|
||||
between regularity and convergence speed for the 1D function approximation
|
||||
\caption[Correlation 1D *regularity* against iterations]{Inverted Spearman's correlation (and p--values)
|
||||
between *regularity* and number of iterations for the 1D function approximation
|
||||
problem.
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.
|
||||
}
|
||||
\label{tab:1dreg}
|
||||
\end{table}
|
||||
|
||||
Regularity should correspond to the convergence speed (measured in
|
||||
*Regularity* should correspond to the convergence speed (measured in
|
||||
iteration--steps of the evolutionary algorithm), and is computed as inverse
|
||||
condition number $\kappa(\vec{U})$ of the deformation--matrix.
|
||||
|
||||
@ -986,9 +988,9 @@ correlation of $- r_S = -0.72, p = 0$, that is opposed to our expectations.
|
||||
|
||||
To explain this discrepancy we took a closer look at what caused these high number
|
||||
of iterations. In figure \ref{fig:1dreg} we also plotted the
|
||||
improvement-potential against the steps next to the regularity--plot. Our theory
|
||||
*improvement potential* against the steps next to the *regularity*--plot. Our theory
|
||||
is that the *very strong* correlation ($-r_S = -0.82, p=0$) between
|
||||
improvement--potential and number of iterations hints that the employed
|
||||
*improvement potential* and number of iterations hints that the employed
|
||||
algorithm simply takes longer to converge on a better solution (as seen in
|
||||
figure \ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the
|
||||
regularity--measurement could achieve.
|
||||
@ -998,14 +1000,14 @@ regularity--measurement could achieve.
|
||||
\begin{figure}[ht]
|
||||
\centering
|
||||
\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png}
|
||||
\caption[Correlation 1D Improvement vs. Error]{Improvement potential plotted
|
||||
\caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted
|
||||
against the error yielded by the evolutionary optimization for different
|
||||
grid--resolutions}
|
||||
\label{fig:1dimp}
|
||||
\end{figure}
|
||||
|
||||
The improvement potential should correlate to the quality of the
|
||||
fitting--result. We plotted the results for the tested grid-sizes $5 \times 5$,
|
||||
The *improvement potential* should correlate to the quality of the
|
||||
fitting--result. We plotted the results for the tested grid--sizes $5 \times 5$,
|
||||
$7 \times 7$ and $10 \times 10$ in figure \ref{fig:1dimp}. We tested the
|
||||
$4 \times 7$ and $7 \times 4$ grids as well, but omitted them from the plot.
|
||||
|
||||
@ -1035,7 +1037,7 @@ Initially we set up the correspondences $\vec{c_T(\dots)}$ and $\vec{c_S(\dots)}
|
||||
the respectively closest vertices of the other model. We then calculate the
|
||||
analytical solution given these correspondences via $\vec{P^{*}} = \vec{U^+}\vec{T}$,
|
||||
and also use the first solution as guessed gradient for the calculation of the
|
||||
*improvement--potential*, as the optimal solution is not known.
|
||||
*improvement potential*, as the optimal solution is not known.
|
||||
We then let the evolutionary algorithm run up within $1.05$ times the error of
|
||||
this solution and afterwards recalculate the correspondences $\vec{c_T(\dots)}$
|
||||
and $\vec{c_S(\dots)}$.
|
||||
@ -1063,11 +1065,11 @@ iterations until the regularization--effect wears off.
|
||||
|
||||
The grid we use for our experiments is just very coarse due to computational
|
||||
limitations. We are not interested in a good reconstruction, but an estimate if
|
||||
the mentioned evolvability criteria are good.
|
||||
the mentioned evolvability--criteria are good.
|
||||
|
||||
In figure \ref{fig:setup3d} we show an example setup of the scene with a
|
||||
$4\times 4\times 4$--grid. Identical to the 1--dimensional scenario before, we create a
|
||||
regular grid and move the control-points in the exact same random manner between
|
||||
regular grid and move the control--points in the exact same random manner between
|
||||
their neighbours as described in section \ref{sec:proc:1d}, but in three instead
|
||||
of two dimensions^[Again, we flip the signs for the edges, if necessary to have
|
||||
the object still in the convex hull.].
|
||||
@ -1083,16 +1085,16 @@ Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.}
|
||||
|
||||
As is clearly visible from figure \ref{fig:3dgridres}, the target--model has many
|
||||
vertices in the facial area, at the ears and in the neck--region. Therefore we
|
||||
chose to increase the grid-resolutions for our tests in two different dimensions
|
||||
chose to increase the grid--resolutions for our tests in two different dimensions
|
||||
and see how well the criteria predict a suboptimal placement of these
|
||||
control-points.
|
||||
control--points.
|
||||
|
||||
## Results of 3D Function Approximation
|
||||
|
||||
In the 3D--Approximation we tried to evaluate further on the impact of the
|
||||
grid--layout to the overall criteria. As the target--model has many vertices in
|
||||
concentrated in the facial area we start from a $4 \times 4 \times 4$ grid and
|
||||
only increase the number of control points in one dimension, yielding a
|
||||
only increase the number of control--points in one dimension, yielding a
|
||||
resolution of $7 \times 4 \times 4$ and $4 \times 4 \times 7$ respectively. We
|
||||
visualized those two grids in figure \ref{fig:3dgridres}.
|
||||
|
||||
@ -1121,10 +1123,10 @@ $4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \t
|
||||
\hline
|
||||
0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0)
|
||||
\end{tabular}
|
||||
\caption[Correlation between variability and fitting error for 3D]{Correlation
|
||||
between variability and fitting error for the 3D fitting scenario.\newline
|
||||
Displayed are the negated Spearman coefficients with the corresponding p-values
|
||||
in brackets for three cases of increasing variability ($\mathrm{X} \in [4,5,7],
|
||||
\caption[Correlation between *variability* and fitting error for 3D]{Correlation
|
||||
between *variability* and fitting error for the 3D fitting scenario.\newline
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7],
|
||||
\mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
\label{tab:3dvar}
|
||||
@ -1143,37 +1145,37 @@ Interestingly both variants end up closer in terms of fitting error than we
|
||||
anticipated, which shows that the evolutionary algorithm we employed is capable
|
||||
of correcting a purposefully created \glqq bad\grqq \ grid. Also this confirms,
|
||||
that in our cases the number of control--points is more important for quality
|
||||
than their placement, which is captured by the variability via the rank of the
|
||||
than their placement, which is captured by the *variability* via the rank of the
|
||||
deformation--matrix.
|
||||
|
||||
Overall the correlation between *variability* and fitness--error were
|
||||
*significant* and showed a *very strong* correlation in all our tests.
|
||||
The detailed correlation--coefficients are given in table \ref{tab:3dvar}
|
||||
alongside their p--values.
|
||||
|
||||
As introduces in section \ref{sec:impl:grid} and visualized in figure
|
||||
\ref{fig:enoughCP}, we know, that not all control--points have to necessarily
|
||||
contribute to the parametrization of our 3D--model. Because we are starting from
|
||||
a sphere, some control--points are too far away from the surface to contribute
|
||||
to the deformation at all.
|
||||
|
||||
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
|
||||
starts with a regular $9 \times 9$ grid on a perfect circle. To make sure we
|
||||
observe this, we evaluated the *variability* for 100 randomly moved $10 \times 10 \times 10$
|
||||
grids on the sphere we start out with.
|
||||
|
||||
\begin{figure}[hbt]
|
||||
\centering
|
||||
\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png}
|
||||
\caption[Histogram of ranks of high--resolution deformation--matrices]{
|
||||
Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$
|
||||
control--points each showing in this case how many control points are actually
|
||||
control--points each showing in this case how many control--points are actually
|
||||
used in the calculations.
|
||||
}
|
||||
\label{fig:histrank3d}
|
||||
\end{figure}
|
||||
|
||||
Overall the correlation between variability and fitness--error were
|
||||
*significant* and showed a *very strong* correlation in all our tests.
|
||||
The detailed correlation--coefficients are given in table \ref{tab:3dvar}
|
||||
alongside their p--values.
|
||||
|
||||
As introduces in section \ref{sec:impl:grid} and visualized in figure
|
||||
\ref{fig:enoughCP}, we know, that not all control points have to necessarily
|
||||
contribute to the parametrization of our 3D--model. Because we are starting from
|
||||
a sphere, some control-points are too far away from the surface to contribute
|
||||
to the deformation at all.
|
||||
|
||||
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
|
||||
starts with a regular $9 \times 9$ grid on a perfect circle. To make sure we
|
||||
observe this, we evaluated the variability for 100 randomly moved $10 \times 10 \times 10$
|
||||
grids on the sphere we start out with.
|
||||
|
||||
As the variability is defined by $\frac{\mathrm{rank}(\vec{U})}{n}$ we can
|
||||
As the *variability* is defined by $\frac{\mathrm{rank}(\vec{U})}{n}$ we can
|
||||
easily recover the rank of the deformation--matrix $\vec{U}$. The results are
|
||||
shown in the histogram in figure \ref{fig:histrank3d}. Especially in the centre
|
||||
of the sphere and in the corners of our grid we effectively loose
|
||||
@ -1184,7 +1186,7 @@ to use and one should expect a loss in quality evident by a higher
|
||||
reconstruction--error opposed to a grid where they are used. Sadly we could not
|
||||
run a in--depth test on this due to computational limitations.
|
||||
|
||||
Nevertheless this hints at the notion, that variability is a good measure for
|
||||
Nevertheless this hints at the notion, that *variability* is a good measure for
|
||||
the overall quality of a fit.
|
||||
|
||||
### Regularity
|
||||
@ -1212,19 +1214,19 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
|
||||
\cline{2-4}
|
||||
\multicolumn{3}{c}{} & all: 0.15 (0) \T
|
||||
\end{tabular}
|
||||
\caption[Correlation between regularity and iterations for 3D]{Correlation
|
||||
between regularity and number of iterations for the 3D fitting scenario.
|
||||
\caption[Correlation between *regularity* and iterations for 3D]{Correlation
|
||||
between *regularity* and number of iterations for the 3D fitting scenario.
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
\label{tab:3dreg}
|
||||
\end{table}
|
||||
|
||||
Opposed to the predictions of variability our test on regularity gave a mixed
|
||||
Opposed to the predictions of *variability* our test on *regularity* gave a mixed
|
||||
result --- similar to the 1D--case.
|
||||
|
||||
In roughly half of the scenarios we have a *significant*, but *weak* to *moderate*
|
||||
correlation between regularity and number of iterations. On the other hand in
|
||||
correlation between *regularity* and number of iterations. On the other hand in
|
||||
the scenarios where we increased the number of control--points, namely $125$ for
|
||||
the $5 \times 5 \times 5$ grid and $216$ for the $6 \times 6 \times 6$ grid we found
|
||||
a *significant*, but *weak* **anti**--correlation when taking all three tests into
|
||||
@ -1233,14 +1235,14 @@ findings/trends for the sets with $64$, $80$, and $112$ control--points
|
||||
(first two rows of table \ref{tab:3dreg}).
|
||||
|
||||
Taking all results together we only find a *very weak*, but *significant* link
|
||||
between regularity and the number of iterations needed for the algorithm to
|
||||
between *regularity* and the number of iterations needed for the algorithm to
|
||||
converge.
|
||||
|
||||
\begin{figure}[!htb]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png}
|
||||
\caption[Regularity for different 3D--grids]{
|
||||
Plots of regularity against number of iterations for various scenarios together
|
||||
Plots of *regularity* against number of iterations for various scenarios together
|
||||
with a linear fit to indicate trends.}
|
||||
\label{fig:resreg3d}
|
||||
\end{figure}
|
||||
@ -1250,10 +1252,10 @@ the number of control--points helps the convergence--speeds. The
|
||||
regularity--criterion first behaves as we would like to, but then switches to
|
||||
behave exactly opposite to our expectations, as can be seen in the first three
|
||||
plots. While the number of control--points increases from red to green to blue
|
||||
and the number of iterations decreases, the regularity seems to increase at
|
||||
and the number of iterations decreases, the *regularity* seems to increase at
|
||||
first, but then decreases again on higher grid--resolutions.
|
||||
|
||||
This can be an artefact of the definition of regularity, as it is defined by the
|
||||
This can be an artefact of the definition of *regularity*, as it is defined by the
|
||||
inverse condition--number of the deformation--matrix $\vec{U}$, being the
|
||||
fraction $\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}$ between the
|
||||
least and greatest right singular value.
|
||||
@ -1264,9 +1266,9 @@ and so a small minimal right singular value occurring on higher
|
||||
grid--resolutions seems likely the problem.
|
||||
|
||||
Adding to this we also noted, that in the case of the $10 \times 10 \times
|
||||
10$--grid the regularity was always $0$, as a non--contributing control-point
|
||||
10$--grid the *regularity* was always $0$, as a non--contributing control--point
|
||||
yields a $0$--column in the deformation--matrix, thus letting
|
||||
$\sigma_\mathrm{min} = 0$. A better definition for regularity (i.e. using the
|
||||
$\sigma_\mathrm{min} = 0$. A better definition for *regularity* (i.e. using the
|
||||
smallest non--zero right singular value) could solve this particular issue, but
|
||||
not fix the trend we noticed above.
|
||||
|
||||
@ -1295,8 +1297,8 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
|
||||
\cline{2-4}
|
||||
\multicolumn{3}{c}{} & all: 0.95 (0) \T
|
||||
\end{tabular}
|
||||
\caption[Correlation between improvement--potential and fitting--error for 3D]{Correlation
|
||||
between improvement--potential and fitting--error for the 3D fitting scenario.
|
||||
\caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation
|
||||
between *improvement potential* and fitting--error for the 3D fitting scenario.
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
@ -1314,20 +1316,20 @@ quality of such gradients anyway.
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png}
|
||||
\caption[Improvement potential for different 3D--grids]{
|
||||
Plots of improvement potential against error given by our fitness--function
|
||||
Plots of *improvement potential* against error given by our *fitness--function*
|
||||
after convergence together with a linear fit of each of the plotted data to
|
||||
indicate trends.}
|
||||
\label{fig:resimp3d}
|
||||
\end{figure}
|
||||
|
||||
We plotted our findings on the improvement potential in a similar way as we did
|
||||
before with the regularity. In figure \ref{fig:resimp3d} one can clearly see the
|
||||
We plotted our findings on the *improvement potential* in a similar way as we did
|
||||
before with the *regularity*. In figure \ref{fig:resimp3d} one can clearly see the
|
||||
correlation and the spread within each setup and the behaviour when we increase
|
||||
the number of control--points.
|
||||
|
||||
Along with this we also give the Spearman--coefficients along with their
|
||||
p--values in table \ref{tab:3dimp}. Within one scenario we only find a *weak* to
|
||||
*moderate* correlation between the improvement potential and the fitting error,
|
||||
*moderate* correlation between the *improvement potential* and the fitting error,
|
||||
but all findings (except for $7 \times 4 \times 4$ and $6 \times 6 \times 6$)
|
||||
are significant.
|
||||
|
||||
@ -1335,14 +1337,14 @@ If we take multiple datasets into account the correlation is *very strong* and
|
||||
*significant*, which is good, as this functions as a litmus--test, because the
|
||||
quality is naturally tied to the number of control--points.
|
||||
|
||||
All in all the improvement potential seems to be a good and sensible measure of
|
||||
All in all the *improvement potential* seems to be a good and sensible measure of
|
||||
quality, even given gradients of varying quality.
|
||||
|
||||
Lastly, a small note on the behaviour of improvement potential and convergence
|
||||
Lastly, a small note on the behaviour of *improvement potential* and convergence
|
||||
speed, as we used this in the 1D case to argue, why the *regularity* defied our
|
||||
expectations. As a contrast we wanted to show, that improvement potential cannot
|
||||
expectations. As a contrast we wanted to show, that *improvement potential* cannot
|
||||
serve for good predictions of the convergence speed. In figure
|
||||
\ref{fig:imp1d3d} we show improvement potential against number of iterations
|
||||
\ref{fig:imp1d3d} we show *improvement potential* against number of iterations
|
||||
for both scenarios. As one can see, in the 1D scenario we have a *strong*
|
||||
and *significant* correlation (with $-r_S = -0.72$, $p = 0$), whereas in the 3D
|
||||
scenario we have the opposite *significant* and *strong* effect (with
|
||||
@ -1352,11 +1354,11 @@ scenario and are not suited for generalization.
|
||||
\begin{figure}[hbt]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/imp1d3d.png}
|
||||
\caption[Improvement potential and convergence speed for 1D and 3D--scenarios]{
|
||||
\caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{
|
||||
\newline
|
||||
Left: Improvement potential against convergence speed for the
|
||||
Left: *Improvement potential* against convergence speed for the
|
||||
1D--scenario\newline
|
||||
Right: Improvement potential against convergence speed for the 3D--scnario
|
||||
Right: *Improvement potential* against convergence speed for the 3D--scnario
|
||||
}
|
||||
\label{fig:imp1d3d}
|
||||
\end{figure}
|
||||
@ -1364,23 +1366,23 @@ Right: Improvement potential against convergence speed for the 3D--scnario
|
||||
# Discussion and outlook
|
||||
\label{sec:dis}
|
||||
|
||||
In this thesis we took a look at the different criteria for evolvability as
|
||||
In this thesis we took a look at the different criteria for *evolvability* as
|
||||
introduced by Richter et al.\cite{anrichterEvol}, namely *variability*,
|
||||
*regularity* and *improvement potential* under different setup--conditions.
|
||||
Where Richter et al. used \acf{RBF}, we employed \acf{FFD} to set up a
|
||||
low--complexity parametrization of a more complex vertex--mesh.
|
||||
|
||||
In our findings we could show in the 1D--scenario, that there were statistically
|
||||
significant very strong correlations between *variability and fitting error*
|
||||
($0.94$) and *improvement--potential and fitting error* ($1.0$) with
|
||||
*significant* *very strong* correlations between *variability and fitting error*
|
||||
($0.94$) and *improvement potential and fitting error* ($1.0$) with
|
||||
comparable results than Richter et al. (with $0.31$ to $0.88$
|
||||
for the former and $0.75$ to $0.99$ for the latter), whereas we found
|
||||
only weak correlations for *regularity and convergence--speed* ($0.28$)
|
||||
only *weak* correlations for *regularity and convergence--speed* ($0.28$)
|
||||
opposed to Richter et al. with $0.39$ to $0.91$.^[We only took statistically
|
||||
*significant* results into consideration when compiling these numbers. Details
|
||||
are given in the respective chapters.]
|
||||
|
||||
For the 3D--scenario our results show a very strong, significant correlation
|
||||
For the 3D--scenario our results show a *very strong*, *significant* correlation
|
||||
between *variability and fitting error* with $0.89$ to $0.94$, which are pretty
|
||||
much in line with the findings of Richter et al. ($0.65$ to $0.95$). The
|
||||
correlation between *improvement potential and fitting error* behave similar,
|
||||
@ -1410,10 +1412,7 @@ in \cite{anrichterEvol}, whereas we merely used an indirect \ac{FFD}--approach.
|
||||
As direct manipulations tend to perform better than indirect manipulations, the
|
||||
usage of \acf{DM--FFD} could also work better with the criteria we examined.
|
||||
This can also solve the problem of bad singular values for the *regularity* as
|
||||
the incorporation of the parametrization of the points on the surface, which are
|
||||
the essential part of a direct--manipulation, could cancel out a bad
|
||||
the incorporation of the parametrization of the points on the surface --- which
|
||||
are the essential part of a direct--manipulation --- could cancel out a bad
|
||||
control--grid as the bad control--points are never or negligibly used to
|
||||
parametrize those surface--points.
|
||||
|
||||
\improvement[inline]{Bibliotheksverzeichnis links anpassen. DOI überschreibt
|
||||
Direktlinks des Autors.}
|
||||
|
BIN
arbeit/ma.pdf
BIN
arbeit/ma.pdf
Binary file not shown.
503
arbeit/ma.tex
503
arbeit/ma.tex
@ -197,20 +197,20 @@ the translation of the problem--domain into a simple parametric
|
||||
representation (the \emph{genome}) can be challenging.
|
||||
|
||||
This translation is often necessary as the target of the optimization
|
||||
may have too many degrees of freedom. In the example of an aerodynamic
|
||||
simulation of drag onto an object, those object--designs tend to have a
|
||||
high number of vertices to adhere to various requirements (visual,
|
||||
practical, physical, etc.). A simpler representation of the same object
|
||||
in only a few parameters that manipulate the whole in a sensible matter
|
||||
are desirable, as this often decreases the computation time
|
||||
significantly.
|
||||
may have too many degrees of freedom for a reasonable computation. In
|
||||
the example of an aerodynamic simulation of drag onto an object, those
|
||||
object--designs tend to have a high number of vertices to adhere to
|
||||
various requirements (visual, practical, physical, etc.). A simpler
|
||||
representation of the same object in only a few parameters that
|
||||
manipulate the whole in a sensible matter are desirable, as this often
|
||||
decreases the computation time significantly.
|
||||
|
||||
Additionally one can exploit the fact, that drag in this case is
|
||||
especially sensitive to non--smooth surfaces, so that a smooth local
|
||||
manipulation of the surface as a whole is more advantageous than merely
|
||||
random manipulation of the vertices.
|
||||
|
||||
The quality of such a low-dimensional representation in biological
|
||||
The quality of such a low--dimensional representation in biological
|
||||
evolution is strongly tied to the notion of
|
||||
\emph{evolvability}\cite{wagner1996complex}, as the parametrization of
|
||||
the problem has serious implications on the convergence speed and the
|
||||
@ -230,7 +230,7 @@ One example of such a general representation of an object is to generate
|
||||
random points and represent vertices of an object as distances to these
|
||||
points --- for example via \acf{RBF}. If one (or the algorithm) would
|
||||
move such a point the object will get deformed only locally (due to the
|
||||
\ac{RBF}). As this results in a simple mapping from the parameter-space
|
||||
\ac{RBF}). As this results in a simple mapping from the parameter--space
|
||||
onto the object one can try out different representations of the same
|
||||
object and evaluate which criteria may be suited to describe this notion
|
||||
of \emph{evolvability}. This is exactly what Richter et
|
||||
@ -238,18 +238,19 @@ al.\cite{anrichterEvol} have done.
|
||||
|
||||
As we transfer the results of Richter et al.\cite{anrichterEvol} from
|
||||
using \acf{RBF} as a representation to manipulate geometric objects to
|
||||
the use of \acf{FFD} we will use the same definition for evolvability
|
||||
the original author used, namely \emph{regularity}, \emph{variability},
|
||||
and \emph{improvement potential}. We introduce these term in detail in
|
||||
Chapter \ref{sec:intro:rvi}. In the original publication the author
|
||||
could show a correlation between these evolvability--criteria with the
|
||||
quality and convergence speed of such optimization.
|
||||
the use of \acf{FFD} we will use the same definition for
|
||||
\emph{evolvability} the original author used, namely \emph{regularity},
|
||||
\emph{variability}, and \emph{improvement potential}. We introduce these
|
||||
term in detail in Chapter \ref{sec:intro:rvi}. In the original
|
||||
publication the author could show a correlation between these
|
||||
evolvability--criteria with the quality and convergence speed of such
|
||||
optimization.
|
||||
|
||||
We will replicate the same setup on the same objects but use \acf{FFD}
|
||||
instead of \acf{RBF} to create a local deformation near the control
|
||||
points and evaluate if the evolution--criteria still work as a predictor
|
||||
for \emph{evolvability} of the representation given the different
|
||||
deformation scheme, as suspected in \cite{anrichterEvol}.
|
||||
instead of \acf{RBF} to create a local deformation near the
|
||||
control--points and evaluate if the evolution--criteria still work as a
|
||||
predictor for \emph{evolvability} of the representation given the
|
||||
different deformation scheme, as suspected in \cite{anrichterEvol}.
|
||||
|
||||
First we introduce different topics in isolation in Chapter
|
||||
\ref{sec:back}. We take an abstract look at the definition of \ac{FFD}
|
||||
@ -258,7 +259,7 @@ is a sensible deformation function (in \ref{sec:back:ffdgood}). Then we
|
||||
establish some background--knowledge of evolutionary algorithms (in
|
||||
\ref{sec:back:evo}) and why this is useful in our domain (in
|
||||
\ref{sec:back:evogood}) followed by the definition of the different
|
||||
evolvability criteria established in \cite{anrichterEvol} (in
|
||||
evolvability--criteria established in \cite{anrichterEvol} (in
|
||||
\ref {sec:intro:rvi}).
|
||||
|
||||
In Chapter \ref{sec:impl} we take a look at our implementation of
|
||||
@ -285,18 +286,19 @@ from \cite{spitzmuller1996bezier} here and go into the extension to the
|
||||
|
||||
The main idea of \ac{FFD} is to create a function
|
||||
\(s : [0,1[^d \mapsto \mathbb{R}^d\) that spans a certain part of a
|
||||
vector--space and is only linearly parametrized by some special control
|
||||
points \(p_i\) and an constant attribution--function \(a_i(u)\), so \[
|
||||
vector--space and is only linearly parametrized by some special
|
||||
control--points \(p_i\) and an constant attribution--function
|
||||
\(a_i(u)\), so \[
|
||||
s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i}
|
||||
\] can be thought of a representation of the inside of the convex hull
|
||||
generated by the control points where each point can be accessed by the
|
||||
right \(u \in [0,1[^d\).
|
||||
generated by the control--points where each position inside can be
|
||||
accessed by the right \(u \in [0,1[^d\).
|
||||
|
||||
\begin{figure}[!ht]
|
||||
\begin{center}
|
||||
\includegraphics[width=0.7\textwidth]{img/B-Splines.png}
|
||||
\end{center}
|
||||
\caption[Example of B-Splines]{Example of a parametrization of a line with
|
||||
\caption[Example of B--Splines]{Example of a parametrization of a line with
|
||||
corresponding deformation to generate a deformed objet}
|
||||
\label{fig:bspline}
|
||||
\end{figure}
|
||||
@ -338,7 +340,8 @@ We can even derive this equation straightforward for an arbitrary
|
||||
\[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\]
|
||||
|
||||
For a B--Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these
|
||||
derivations yield \(\frac{\partial^d}{\partial u} s(u) = 0\).
|
||||
derivations yield
|
||||
\(\left(\frac{\partial}{\partial u}\right)^d s(u) = 0\).
|
||||
|
||||
Another interesting property of these recursive polynomials is that they
|
||||
are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in
|
||||
@ -348,17 +351,17 @@ step of the recursion.
|
||||
|
||||
This means that all changes are only a local linear combination between
|
||||
the control--point \(p_i\) to \(p_{i+d+1}\) and consequently this yields
|
||||
to the convex--hull--property of B-Splines --- meaning, that no matter
|
||||
to the convex--hull--property of B--Splines --- meaning, that no matter
|
||||
how we choose our coefficients, the resulting points all have to lie
|
||||
inside convex--hull of the control--points.
|
||||
|
||||
For a given point \(v_i\) we can then calculate the contributions
|
||||
\(n_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the
|
||||
For a given point \(s_i\) we can then calculate the contributions
|
||||
\(u_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the
|
||||
projection from the control--point--space into the object--space: \[
|
||||
v_i = \sum_j n_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
|
||||
s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
|
||||
\] or written for all points at the same time: \[
|
||||
\vec{v} = \vec{N} \vec{p}
|
||||
\] where \(\vec{N}\) is the \(n \times m\) transformation--matrix (later
|
||||
\vec{s} = \vec{U} \vec{p}
|
||||
\] where \(\vec{U}\) is the \(n \times m\) transformation--matrix (later
|
||||
on called \textbf{deformation matrix}) for \(n\) object--space--points
|
||||
and \(m\) control--points.
|
||||
|
||||
@ -372,7 +375,7 @@ of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum
|
||||
up to one (partition of unity). In this example, we use B--Splines of degree 2.
|
||||
The horizontal segment below the abscissa axis represents the domain of
|
||||
influence of the B--splines basis function, i.e. the interval on which they are
|
||||
not null. At a given point, there are at most $ d+1$ non-zero B--Spline basis
|
||||
not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis
|
||||
functions (compact support).\grqq \newline
|
||||
Note, that Brunet starts his index at $-d$ opposed to our definition, where we
|
||||
start at $0$.}
|
||||
@ -381,8 +384,8 @@ start at $0$.}
|
||||
|
||||
Furthermore B--Spline--basis--functions form a partition of unity for
|
||||
all, but the first and last \(d\)
|
||||
control-points\cite{brunet2010contributions}. Therefore we later on use
|
||||
the border-points \(d+1\) times, such that \(\sum_j n_{i,j} p_j = p_i\)
|
||||
control--points\cite{brunet2010contributions}. Therefore we later on use
|
||||
the border--points \(d+1\) times, such that \(\sum_j u_{i,j} p_j = p_i\)
|
||||
for these points.
|
||||
|
||||
The locality of the influence of each control--point and the partition
|
||||
@ -395,13 +398,13 @@ function?}{Why is a good deformation function?}}\label{why-is-a-good-deformatio
|
||||
\label{sec:back:ffdgood}
|
||||
|
||||
The usage of \ac{FFD} as a tool for manipulating follows directly from
|
||||
the properties of the polynomials and the correspondence to the control
|
||||
points. Having only a few control points gives the user a nicer
|
||||
high--level--interface, as she only needs to move these points and the
|
||||
model follows in an intuitive manner. The deformation is smooth as the
|
||||
underlying polygon is smooth as well and affects as many vertices of the
|
||||
model as needed. Moreover the changes are always local so one risks not
|
||||
any change that a user cannot immediately see.
|
||||
the properties of the polynomials and the correspondence to the
|
||||
control--points. Having only a few control--points gives the user a
|
||||
nicer high--level--interface, as she only needs to move these points and
|
||||
the model follows in an intuitive manner. The deformation is smooth as
|
||||
the underlying polygon is smooth as well and affects as many vertices of
|
||||
the model as needed. Moreover the changes are always local so one risks
|
||||
not any change that a user cannot immediately see.
|
||||
|
||||
But there are also disadvantages of this approach. The user loses the
|
||||
ability to directly influence vertices and even seemingly simple tasks
|
||||
@ -479,7 +482,7 @@ through our \emph{fitness--function}, biologically by the ability to
|
||||
survive and produce offspring). Any individual in our algorithm thus
|
||||
experience a biologically motivated life cycle of inheriting genes from
|
||||
the parents, modified by mutations occurring, performing according to a
|
||||
fitness--metric and generating offspring based on this. Therefore each
|
||||
fitness--metric, and generating offspring based on this. Therefore each
|
||||
iteration in the while--loop above is also often named generation.
|
||||
|
||||
One should note that there is a subtle difference between
|
||||
@ -517,8 +520,8 @@ The main algorithm just repeats the following steps:
|
||||
\(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that
|
||||
selects from the previously generated \(I^\lambda\) children and
|
||||
optionally also the parents (denoted by the set \(Q\) in the
|
||||
algorithm) using the fitness--function \(\Phi\). The result of this
|
||||
operation is the next Population of \(\mu\) individuals.
|
||||
algorithm) using the \emph{fitness--function} \(\Phi\). The result of
|
||||
this operation is the next Population of \(\mu\) individuals.
|
||||
\end{itemize}
|
||||
|
||||
All these functions can (and mostly do) have a lot of hidden parameters
|
||||
@ -547,10 +550,10 @@ algorithms}\label{advantages-of-evolutionary-algorithms}
|
||||
|
||||
The main advantage of evolutionary algorithms is the ability to find
|
||||
optima of general functions just with the help of a given
|
||||
fitness--function. Components and techniques for evolutionary algorithms
|
||||
are specifically known to help with different problems arising in the
|
||||
domain of optimization\cite{weise2012evolutionary}. An overview of the
|
||||
typical problems are shown in figure \ref{fig:probhard}.
|
||||
\emph{fitness--function}. Components and techniques for evolutionary
|
||||
algorithms are specifically known to help with different problems
|
||||
arising in the domain of optimization\cite{weise2012evolutionary}. An
|
||||
overview of the typical problems are shown in figure \ref{fig:probhard}.
|
||||
|
||||
\begin{figure}[!ht]
|
||||
\includegraphics[width=\textwidth]{img/weise_fig3.png}
|
||||
@ -559,13 +562,14 @@ typical problems are shown in figure \ref{fig:probhard}.
|
||||
\end{figure}
|
||||
|
||||
Most of the advantages stem from the fact that a gradient--based
|
||||
procedure has only one point of observation from where it evaluates the
|
||||
next steps, whereas an evolutionary strategy starts with a population of
|
||||
guessed solutions. Because an evolutionary strategy can be modified
|
||||
according to the problem--domain (i.e.~by the ideas given above) it can
|
||||
also approximate very difficult problems in an efficient manner and even
|
||||
self--tune parameters depending on the ancestry at runtime\footnote{Some
|
||||
examples of this are explained in detail in \cite{eiben1999parameter}}.
|
||||
procedure has usually only one point of observation from where it
|
||||
evaluates the next steps, whereas an evolutionary strategy starts with a
|
||||
population of guessed solutions. Because an evolutionary strategy can be
|
||||
modified according to the problem--domain (i.e.~by the ideas given
|
||||
above) it can also approximate very difficult problems in an efficient
|
||||
manner and even self--tune parameters depending on the ancestry at
|
||||
runtime\footnote{Some examples of this are explained in detail in
|
||||
\cite{eiben1999parameter}}.
|
||||
|
||||
If an analytic best solution exists and is easily computable
|
||||
(i.e.~because the error--function is convex) an evolutionary algorithm
|
||||
@ -599,8 +603,8 @@ deformation.
|
||||
We can also think of the deformation in terms of differences from the
|
||||
original coordinates \[
|
||||
\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P}
|
||||
\] which is isomorphic to the former due to the linear correlation in
|
||||
the deformation. One can see in this way, that the way the deformation
|
||||
\] which is isomorphic to the former due to the linearity of the
|
||||
deformation. One can see in this way, that the way the deformation
|
||||
behaves lies solely in the entries of \(\vec{U}\), which is why the
|
||||
three criteria focus on this.
|
||||
|
||||
@ -609,14 +613,14 @@ three criteria focus on this.
|
||||
In \cite{anrichterEvol} \emph{variability} is defined as
|
||||
\[\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},\]
|
||||
whereby \(\vec{U}\) is the \(n \times m\) deformation--Matrix used to
|
||||
map the \(m\) control points onto the \(n\) vertices.
|
||||
map the \(m\) control--points onto the \(n\) vertices.
|
||||
|
||||
Given \(n = m\), an identical number of control--points and vertices,
|
||||
this quotient will be \(=1\) if all control points are independent of
|
||||
this quotient will be \(=1\) if all control--points are independent of
|
||||
each other and the solution is to trivially move every control--point
|
||||
onto a target--point.
|
||||
|
||||
In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because as
|
||||
In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because
|
||||
there are only few control--points for many vertices, so \(m \ll n\).
|
||||
|
||||
This criterion should correlate to the degrees of freedom the given
|
||||
@ -641,7 +645,7 @@ As we deform the given Object only based on the parameters as
|
||||
\(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that
|
||||
\(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\).
|
||||
The inversion of \(\kappa(\vec{U})\) is only performed to map the
|
||||
criterion--range to \([0..1]\), whereas \(1\) is the optimal value and
|
||||
criterion--range to \([0..1]\), where \(1\) is the optimal value and
|
||||
\(0\) is the worst value.
|
||||
|
||||
On the one hand this criterion should be characteristic for numeric
|
||||
@ -653,8 +657,8 @@ locality\cite{weise2012evolutionary,thorhauer2014locality}.
|
||||
\subsection{Improvement Potential}\label{improvement-potential}
|
||||
|
||||
In contrast to the general nature of \emph{variability} and
|
||||
\emph{regularity}, which are agnostic of the fitness--function at hand,
|
||||
the third criterion should reflect a notion of the potential for
|
||||
\emph{regularity}, which are agnostic of the \emph{fitness--function} at
|
||||
hand, the third criterion should reflect a notion of the potential for
|
||||
optimization, taking a guess into account.
|
||||
|
||||
Most of the times some kind of gradient \(g\) is available to suggest a
|
||||
@ -698,9 +702,9 @@ As we have established in Chapter \ref{sec:back:ffd} we can define an
|
||||
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
|
||||
\end{equation}
|
||||
|
||||
Note that we only sum up the \(\Delta\)--displacements in the control
|
||||
points \(c_i\) to get the change in position of the point we are
|
||||
interested in.
|
||||
Note that we only sum up the \(\Delta\)--displacements in the
|
||||
control--points \(c_i\) to get the change in position of the point we
|
||||
are interested in.
|
||||
|
||||
In this way every deformed vertex is defined by \[
|
||||
\textrm{Deform}(v_x) = v_x + \Delta_x(u)
|
||||
@ -722,8 +726,8 @@ v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i
|
||||
|
||||
For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which
|
||||
converges into the least--squares solution. An exact solution of this
|
||||
problem is impossible most of the times, because we usually have way
|
||||
more vertices than control points (\(\#v~\gg~\#c\)).
|
||||
problem is impossible most of the time, because we usually have way more
|
||||
vertices than control--points (\(\#v~\gg~\#c\)).
|
||||
|
||||
\section{\texorpdfstring{Adaption of \ac{FFD} for a
|
||||
3D--Mesh}{Adaption of for a 3D--Mesh}}\label{adaption-of-for-a-3dmesh}
|
||||
@ -735,8 +739,8 @@ last chapter. But this time things get a bit more complicated. As we
|
||||
have a 3--dimensional grid we may have a different amount of
|
||||
control--points in each direction.
|
||||
|
||||
Given \(n,m,o\) control points in \(x,y,z\)--direction each Point on the
|
||||
curve is defined by
|
||||
Given \(n,m,o\) control--points in \(x,y,z\)--direction each Point on
|
||||
the curve is defined by
|
||||
\[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\]
|
||||
|
||||
In this case we have three different B--Splines (one for each dimension)
|
||||
@ -814,14 +818,14 @@ behaviour of the evolutionary algorithm.
|
||||
|
||||
As mentioned in chapter \ref{sec:back:evo}, the way of choosing the
|
||||
representation to map the general problem (mesh--fitting/optimization in
|
||||
our case) into a parameter-space is very important for the quality and
|
||||
our case) into a parameter--space is very important for the quality and
|
||||
runtime of evolutionary algorithms\cite{Rothlauf2006}.
|
||||
|
||||
Because our control--points are arranged in a grid, we can accurately
|
||||
represent each vertex--point inside the grids volume with proper
|
||||
B--Spline--coefficients between \([0,1[\) and --- as a consequence ---
|
||||
we have to embed our object into it (or create constant ``dummy''-points
|
||||
outside).
|
||||
we have to embed our object into it (or create constant
|
||||
``dummy''--points outside).
|
||||
|
||||
The great advantage of B--Splines is the local, direct impact of each
|
||||
control point without having a \(1:1\)--correlation, and a smooth
|
||||
@ -844,18 +848,18 @@ One would normally think, that the more control--points you add, the
|
||||
better the result will be, but this is not the case for our B--Splines.
|
||||
Given any point \(\vec{p}\) only the \(2 \cdot (d-1)\) control--points
|
||||
contribute to the parametrization of that point\footnote{Normally these
|
||||
are \(d-1\) to each side, but at the boundaries the number gets
|
||||
increased to the inside to meet the required smoothness}. This means,
|
||||
that a high resolution can have many control-points that are not
|
||||
are \(d-1\) to each side, but at the boundaries border points get used
|
||||
multiple times to meet the number of points required}. This means,
|
||||
that a high resolution can have many control--points that are not
|
||||
contributing to any point on the surface and are thus completely
|
||||
irrelevant to the solution.
|
||||
|
||||
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the
|
||||
four red central points are not relevant for the parametrization of the
|
||||
red central points are not relevant for the parametrization of the
|
||||
circle. This leads to artefacts in the deformation--matrix \(\vec{U}\),
|
||||
as the columns corresponding to those control--points are \(0\).
|
||||
|
||||
This leads to useless increased complexity, as the parameters
|
||||
This also leads to useless increased complexity, as the parameters
|
||||
corresponding to those points will never have any effect, but a naive
|
||||
algorithm will still try to optimize them yielding numeric artefacts in
|
||||
the best and non--terminating or ill--defined solutions\footnote{One
|
||||
@ -869,22 +873,23 @@ in the first place. We will address this in a special scenario in
|
||||
\ref{sec:res:3d:var}.
|
||||
|
||||
For our tests we chose different uniformly sized grids and added noise
|
||||
onto each control-point\footnote{For the special case of the outer layer
|
||||
we only applied noise away from the object, so the object is still
|
||||
confined in the convex hull of the control--points.} to simulate
|
||||
different starting-conditions.
|
||||
onto each control--point\footnote{For the special case of the outer
|
||||
layer we only applied noise away from the object, so the object is
|
||||
still confined in the convex hull of the control--points.} to simulate
|
||||
different starting--conditions.
|
||||
|
||||
\chapter{\texorpdfstring{Scenarios for testing evolvability criteria
|
||||
\chapter{\texorpdfstring{Scenarios for testing evolvability--criteria
|
||||
using
|
||||
\ac{FFD}}{Scenarios for testing evolvability criteria using }}\label{scenarios-for-testing-evolvability-criteria-using}
|
||||
\ac{FFD}}{Scenarios for testing evolvability--criteria using }}\label{scenarios-for-testing-evolvabilitycriteria-using}
|
||||
|
||||
\label{sec:eval}
|
||||
|
||||
In our experiments we use the same two testing--scenarios, that were
|
||||
also used by \cite{anrichterEvol}. The first scenario deforms a plane
|
||||
into a shape originally defined in \cite{giannelli2012thb}, where we
|
||||
setup control-points in a 2--dimensional manner and merely deform in the
|
||||
height--coordinate to get the resulting shape.
|
||||
also used by Richter et al.\cite{anrichterEvol} The first scenario
|
||||
deforms a plane into a shape originally defined by Giannelli et
|
||||
al.\cite{giannelli2012thb}, where we setup control--points in a
|
||||
2--dimensional manner and merely deform in the height--coordinate to get
|
||||
the resulting shape.
|
||||
|
||||
In the second scenario we increase the degrees of freedom significantly
|
||||
by using a 3--dimensional control--grid to deform a sphere into a face,
|
||||
@ -922,7 +927,7 @@ including a wireframe--overlay of the vertices.}
|
||||
\label{fig:1dtarget}
|
||||
\end{figure}
|
||||
|
||||
As the starting-plane we used the same shape, but set all
|
||||
As the starting--plane we used the same shape, but set all
|
||||
\(z\)--coordinates to \(0\), yielding a flat plane, which is partially
|
||||
already correct.
|
||||
|
||||
@ -936,11 +941,11 @@ corresponding vertex
|
||||
|
||||
where \(t_i\) are the respective target--vertices to the parametrized
|
||||
source--vertices\footnote{The parametrization is encoded in \(\vec{U}\)
|
||||
and the initial position of the control points. See
|
||||
and the initial position of the control--points. See
|
||||
\ref{sec:ffd:adapt}} with the current deformation--parameters
|
||||
\(\vec{p} = (p_1,\dots, p_m)\). We can do this
|
||||
one--to--one--correspondence because we have exactly the same number of
|
||||
source and target-vertices do to our setup of just flattening the
|
||||
source and target--vertices do to our setup of just flattening the
|
||||
object.
|
||||
|
||||
This formula is also the least--squares approximation error for which we
|
||||
@ -975,16 +980,16 @@ Both of these Models can be seen in figure \ref{fig:3dtarget}.
|
||||
Opposed to the 1D--case we cannot map the source and target--vertices in
|
||||
a one--to--one--correspondence, which we especially need for the
|
||||
approximation of the fitting--error. Hence we state that the error of
|
||||
one vertex is the distance to the closest vertex of the other model and
|
||||
sum up the error from the respective source and target.
|
||||
one vertex is the distance to the closest vertex of the respective other
|
||||
model and sum up the error from the source and target.
|
||||
|
||||
We therefore define the \emph{fitness--function} to be:
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} -
|
||||
\vec{s_i}\|_2^2}_{\textrm{source-to-target--distance}}
|
||||
\vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}}
|
||||
+ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} -
|
||||
\vec{t_i}\|_2^2}_{\textrm{target-to-source--distance}}
|
||||
\vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}}
|
||||
+ \lambda \cdot \textrm{regularization}(\vec{P})
|
||||
\label{eq:fit3d}
|
||||
\end{equation}
|
||||
@ -1002,7 +1007,7 @@ calculated coefficients for the \ac{FFD} --- analog to the 1D case ---
|
||||
and finally \(\vec{P}\) being the \(m \times 3\)--matrix of the
|
||||
control--grid defining the whole deformation.
|
||||
|
||||
As regularization-term we add a weighted Laplacian of the deformation
|
||||
As regularization--term we add a weighted Laplacian of the deformation
|
||||
that has been used before by Aschenbach et
|
||||
al.\cite[Section 3.2]{aschenbach2015} on similar models and was shown to
|
||||
lead to a more precise fit. The Laplacian
|
||||
@ -1034,7 +1039,7 @@ To compare our results to the ones given by Richter et
|
||||
al.\cite{anrichterEvol}, we also use Spearman's rank correlation
|
||||
coefficient. Opposed to other popular coefficients, like the Pearson
|
||||
correlation coefficient, which measures a linear relationship between
|
||||
variables, the Spearmans's coefficient assesses \glqq how well an
|
||||
variables, the Spearman's coefficient assesses \glqq how well an
|
||||
arbitrary monotonic function can describe the relationship between two
|
||||
variables, without making any assumptions about the frequency
|
||||
distribution of the variables\grqq\cite{hauke2011comparison}.
|
||||
@ -1072,18 +1077,19 @@ Approximation}\label{procedure-1d-function-approximation}
|
||||
\label{sec:proc:1d}
|
||||
|
||||
For our setup we first compute the coefficients of the
|
||||
deformation--matrix and use then the formulas for \emph{variability} and
|
||||
deformation--matrix and use the formulas for \emph{variability} and
|
||||
\emph{regularity} to get our predictions. Afterwards we solve the
|
||||
problem analytically to get the (normalized) correct gradient that we
|
||||
use as guess for the \emph{improvement potential}. To check we also
|
||||
consider a distorted gradient \(\vec{g}_{\mathrm{d}}\) \[
|
||||
use as guess for the \emph{improvement potential}. To further test the
|
||||
\emph{improvement potential} we also consider a distorted gradient
|
||||
\(\vec{g}_{\mathrm{d}}\): \[
|
||||
\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|}
|
||||
\] where \(\mathbb{1}\) is the vector consisting of \(1\) in every
|
||||
dimension, \(\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}\) is the
|
||||
calculated correct gradient, and \(\mu\) is used to blend between
|
||||
\(\vec{g}_\mathrm{c}\) and \(\mathbb{1}\). As we always start with a
|
||||
gradient of \(p = \mathbb{0}\) this means shortens
|
||||
\(\vec{g}_\mathrm{c} = \vec{p^{*}}\).
|
||||
gradient of \(p = \mathbb{0}\) this means we can shorten the definition
|
||||
of \(\vec{g}_\mathrm{c}\) to \(\vec{g}_\mathrm{c} = \vec{p^{*}}\).
|
||||
|
||||
\begin{figure}[ht]
|
||||
\begin{center}
|
||||
@ -1096,10 +1102,10 @@ random distortion to generate a testcase.}
|
||||
|
||||
We then set up a regular 2--dimensional grid around the object with the
|
||||
desired grid resolutions. To generate a testcase we then move the
|
||||
grid--vertices randomly inside the x--y--plane. As self-intersecting
|
||||
grids get tricky to solve with our implemented newtons--method we avoid
|
||||
the generation of such self--intersecting grids for our testcases (see
|
||||
section \ref{3dffd}).
|
||||
grid--vertices randomly inside the x--y--plane. As self--intersecting
|
||||
grids get tricky to solve with our implemented newtons--method (see
|
||||
section \ref{3dffd}) we avoid the generation of such self--intersecting
|
||||
grids for our testcases.
|
||||
|
||||
To achieve that we generated a gaussian distributed number with
|
||||
\(\mu = 0, \sigma=0.25\) and clamped it to the range \([-0.25,0.25]\).
|
||||
@ -1130,12 +1136,12 @@ In the case of our 1D--Optimization--problem, we have the luxury of
|
||||
knowing the analytical solution to the given problem--set. We use this
|
||||
to experimentally evaluate the quality criteria we introduced before. As
|
||||
an evolutional optimization is partially a random process, we use the
|
||||
analytical solution as a stopping-criteria. We measure the convergence
|
||||
analytical solution as a stopping--criteria. We measure the convergence
|
||||
speed as number of iterations the evolutional algorithm needed to get
|
||||
within \(1.05 \times\) of the optimal solution.
|
||||
|
||||
We used different regular grids that we manipulated as explained in
|
||||
Section \ref{sec:proc:1d} with a different number of control points. As
|
||||
Section \ref{sec:proc:1d} with a different number of control--points. As
|
||||
our grids have to be the product of two integers, we compared a
|
||||
\(5 \times 5\)--grid with \(25\) control--points to a \(4 \times 7\) and
|
||||
\(7 \times 4\)--grid with \(28\) control--points. This was done to
|
||||
@ -1157,7 +1163,7 @@ Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.
|
||||
\label{fig:1dvar}
|
||||
\end{figure}
|
||||
|
||||
Variability should characterize the potential for design space
|
||||
\emph{Variability} should characterize the potential for design space
|
||||
exploration and is defined in terms of the normalized rank of the
|
||||
deformation matrix \(\vec{U}\):
|
||||
\(V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}\), whereby \(n\) is the
|
||||
@ -1166,29 +1172,30 @@ number of vertices. As all our tested matrices had a constant rank
|
||||
plotted the errors in the box plot in figure \ref{fig:1dvar}
|
||||
|
||||
It is also noticeable, that although the \(7 \times 4\) and
|
||||
\(4 \times 7\) grids have a higher variability, they perform not better
|
||||
than the \(5 \times 5\) grid. Also the \(7 \times 4\) and \(4 \times 7\)
|
||||
grids differ distinctly from each other with a mean\(\pm\)sigma of
|
||||
\(233.09 \pm 12.32\) for the former and \(286.32 \pm 22.36\) for the
|
||||
latter, although they have the same number of control--points. This is
|
||||
an indication of an impact a proper or improper grid--setup can have. We
|
||||
do not draw scientific conclusions from these findings, as more research
|
||||
on non-squared grids seem necessary.
|
||||
\(4 \times 7\) grids have a higher \emph{variability}, they perform not
|
||||
better than the \(5 \times 5\) grid. Also the \(7 \times 4\) and
|
||||
\(4 \times 7\) grids differ distinctly from each other with a
|
||||
mean\(\pm\)sigma of \(233.09 \pm 12.32\) for the former and
|
||||
\(286.32 \pm 22.36\) for the latter, although they have the same number
|
||||
of control--points. This is an indication of an impact a proper or
|
||||
improper grid--setup can have. We do not draw scientific conclusions
|
||||
from these findings, as more research on non--squared grids seem
|
||||
necessary.
|
||||
|
||||
Leaving the issue of the grid--layout aside we focused on grids having
|
||||
the same number of prototypes in every dimension. For the
|
||||
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids we found a
|
||||
\emph{very strong} correlation (\(-r_S = 0.94, p = 0\)) between the
|
||||
variability and the evolutionary error.
|
||||
\emph{variability} and the evolutionary error.
|
||||
|
||||
\subsection{Regularity}\label{regularity-1}
|
||||
|
||||
\begin{figure}[tbh]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png}
|
||||
\caption[Improvement potential and regularity vs. steps]{\newline
|
||||
Left: Improvement potential against steps until convergence\newline
|
||||
Right: Regularity against steps until convergence\newline
|
||||
\caption[Improvement potential and regularity against iterations]{\newline
|
||||
Left: *Improvement potential* against number of iterations until convergence\newline
|
||||
Right: *Regularity* against number of iterations until convergence\newline
|
||||
Coloured by their grid--resolution, both with a linear fit over the whole
|
||||
dataset.}
|
||||
\label{fig:1dreg}
|
||||
@ -1201,16 +1208,16 @@ $5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\
|
||||
\hline
|
||||
$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$)
|
||||
\end{tabular}
|
||||
\caption[Correlation 1D Regularity/Steps]{Spearman's correlation (and p-values)
|
||||
between regularity and convergence speed for the 1D function approximation
|
||||
\caption[Correlation 1D *regularity* against iterations]{Inverted Spearman's correlation (and p--values)
|
||||
between *regularity* and number of iterations for the 1D function approximation
|
||||
problem.
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.
|
||||
}
|
||||
\label{tab:1dreg}
|
||||
\end{table}
|
||||
|
||||
Regularity should correspond to the convergence speed (measured in
|
||||
iteration--steps of the evolutionary algorithm), and is computed as
|
||||
\emph{Regularity} should correspond to the convergence speed (measured
|
||||
in iteration--steps of the evolutionary algorithm), and is computed as
|
||||
inverse condition number \(\kappa(\vec{U})\) of the deformation--matrix.
|
||||
|
||||
As can be seen from table \ref{tab:1dreg}, we could only show a
|
||||
@ -1222,27 +1229,27 @@ datasets into account we even get a \emph{strong} correlation of
|
||||
|
||||
To explain this discrepancy we took a closer look at what caused these
|
||||
high number of iterations. In figure \ref{fig:1dreg} we also plotted the
|
||||
improvement-potential against the steps next to the regularity--plot.
|
||||
Our theory is that the \emph{very strong} correlation
|
||||
(\(-r_S = -0.82, p=0\)) between improvement--potential and number of
|
||||
iterations hints that the employed algorithm simply takes longer to
|
||||
converge on a better solution (as seen in figure \ref{fig:1dvar} and
|
||||
\ref{fig:1dimp}) offsetting any gain the regularity--measurement could
|
||||
achieve.
|
||||
\emph{improvement potential} against the steps next to the
|
||||
\emph{regularity}--plot. Our theory is that the \emph{very strong}
|
||||
correlation (\(-r_S = -0.82, p=0\)) between \emph{improvement potential}
|
||||
and number of iterations hints that the employed algorithm simply takes
|
||||
longer to converge on a better solution (as seen in figure
|
||||
\ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the
|
||||
regularity--measurement could achieve.
|
||||
|
||||
\subsection{Improvement Potential}\label{improvement-potential-1}
|
||||
|
||||
\begin{figure}[ht]
|
||||
\centering
|
||||
\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png}
|
||||
\caption[Correlation 1D Improvement vs. Error]{Improvement potential plotted
|
||||
\caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted
|
||||
against the error yielded by the evolutionary optimization for different
|
||||
grid--resolutions}
|
||||
\label{fig:1dimp}
|
||||
\end{figure}
|
||||
|
||||
The improvement potential should correlate to the quality of the
|
||||
fitting--result. We plotted the results for the tested grid-sizes
|
||||
The \emph{improvement potential} should correlate to the quality of the
|
||||
fitting--result. We plotted the results for the tested grid--sizes
|
||||
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) in figure
|
||||
\ref{fig:1dimp}. We tested the \(4 \times 7\) and \(7 \times 4\) grids
|
||||
as well, but omitted them from the plot.
|
||||
@ -1280,7 +1287,7 @@ Initially we set up the correspondences \(\vec{c_T(\dots)}\) and
|
||||
other model. We then calculate the analytical solution given these
|
||||
correspondences via \(\vec{P^{*}} = \vec{U^+}\vec{T}\), and also use the
|
||||
first solution as guessed gradient for the calculation of the
|
||||
\emph{improvement--potential}, as the optimal solution is not known. We
|
||||
\emph{improvement potential}, as the optimal solution is not known. We
|
||||
then let the evolutionary algorithm run up within \(1.05\) times the
|
||||
error of this solution and afterwards recalculate the correspondences
|
||||
\(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\).
|
||||
@ -1310,12 +1317,12 @@ regularization--effect wears off.
|
||||
|
||||
The grid we use for our experiments is just very coarse due to
|
||||
computational limitations. We are not interested in a good
|
||||
reconstruction, but an estimate if the mentioned evolvability criteria
|
||||
reconstruction, but an estimate if the mentioned evolvability--criteria
|
||||
are good.
|
||||
|
||||
In figure \ref{fig:setup3d} we show an example setup of the scene with a
|
||||
\(4\times 4\times 4\)--grid. Identical to the 1--dimensional scenario
|
||||
before, we create a regular grid and move the control-points in the
|
||||
before, we create a regular grid and move the control--points in the
|
||||
exact same random manner between their neighbours as described in
|
||||
section \ref{sec:proc:1d}, but in three instead of two
|
||||
dimensions\footnote{Again, we flip the signs for the edges, if necessary
|
||||
@ -1332,9 +1339,9 @@ Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.}
|
||||
|
||||
As is clearly visible from figure \ref{fig:3dgridres}, the target--model
|
||||
has many vertices in the facial area, at the ears and in the
|
||||
neck--region. Therefore we chose to increase the grid-resolutions for
|
||||
neck--region. Therefore we chose to increase the grid--resolutions for
|
||||
our tests in two different dimensions and see how well the criteria
|
||||
predict a suboptimal placement of these control-points.
|
||||
predict a suboptimal placement of these control--points.
|
||||
|
||||
\section{Results of 3D Function
|
||||
Approximation}\label{results-of-3d-function-approximation}
|
||||
@ -1342,8 +1349,8 @@ Approximation}\label{results-of-3d-function-approximation}
|
||||
In the 3D--Approximation we tried to evaluate further on the impact of
|
||||
the grid--layout to the overall criteria. As the target--model has many
|
||||
vertices in concentrated in the facial area we start from a
|
||||
\(4 \times 4 \times 4\) grid and only increase the number of control
|
||||
points in one dimension, yielding a resolution of
|
||||
\(4 \times 4 \times 4\) grid and only increase the number of
|
||||
control--points in one dimension, yielding a resolution of
|
||||
\(7 \times 4 \times 4\) and \(4 \times 4 \times 7\) respectively. We
|
||||
visualized those two grids in figure \ref{fig:3dgridres}.
|
||||
|
||||
@ -1374,10 +1381,10 @@ $4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \t
|
||||
\hline
|
||||
0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0)
|
||||
\end{tabular}
|
||||
\caption[Correlation between variability and fitting error for 3D]{Correlation
|
||||
between variability and fitting error for the 3D fitting scenario.\newline
|
||||
Displayed are the negated Spearman coefficients with the corresponding p-values
|
||||
in brackets for three cases of increasing variability ($\mathrm{X} \in [4,5,7],
|
||||
\caption[Correlation between *variability* and fitting error for 3D]{Correlation
|
||||
between *variability* and fitting error for the 3D fitting scenario.\newline
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7],
|
||||
\mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
\label{tab:3dvar}
|
||||
@ -1399,40 +1406,42 @@ we anticipated, which shows that the evolutionary algorithm we employed
|
||||
is capable of correcting a purposefully created \glqq bad\grqq ~grid.
|
||||
Also this confirms, that in our cases the number of control--points is
|
||||
more important for quality than their placement, which is captured by
|
||||
the variability via the rank of the deformation--matrix.
|
||||
the \emph{variability} via the rank of the deformation--matrix.
|
||||
|
||||
Overall the correlation between \emph{variability} and fitness--error
|
||||
were \emph{significant} and showed a \emph{very strong} correlation in
|
||||
all our tests. The detailed correlation--coefficients are given in table
|
||||
\ref{tab:3dvar} alongside their p--values.
|
||||
|
||||
As introduces in section \ref{sec:impl:grid} and visualized in figure
|
||||
\ref{fig:enoughCP}, we know, that not all control--points have to
|
||||
necessarily contribute to the parametrization of our 3D--model. Because
|
||||
we are starting from a sphere, some control--points are too far away
|
||||
from the surface to contribute to the deformation at all.
|
||||
|
||||
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
|
||||
starts with a regular \(9 \times 9\) grid on a perfect circle. To make
|
||||
sure we observe this, we evaluated the \emph{variability} for 100
|
||||
randomly moved \(10 \times 10 \times 10\) grids on the sphere we start
|
||||
out with.
|
||||
|
||||
\begin{figure}[hbt]
|
||||
\centering
|
||||
\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png}
|
||||
\caption[Histogram of ranks of high--resolution deformation--matrices]{
|
||||
Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$
|
||||
control--points each showing in this case how many control points are actually
|
||||
control--points each showing in this case how many control--points are actually
|
||||
used in the calculations.
|
||||
}
|
||||
\label{fig:histrank3d}
|
||||
\end{figure}
|
||||
|
||||
Overall the correlation between variability and fitness--error were
|
||||
\emph{significant} and showed a \emph{very strong} correlation in all
|
||||
our tests. The detailed correlation--coefficients are given in table
|
||||
\ref{tab:3dvar} alongside their p--values.
|
||||
|
||||
As introduces in section \ref{sec:impl:grid} and visualized in figure
|
||||
\ref{fig:enoughCP}, we know, that not all control points have to
|
||||
necessarily contribute to the parametrization of our 3D--model. Because
|
||||
we are starting from a sphere, some control-points are too far away from
|
||||
the surface to contribute to the deformation at all.
|
||||
|
||||
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
|
||||
starts with a regular \(9 \times 9\) grid on a perfect circle. To make
|
||||
sure we observe this, we evaluated the variability for 100 randomly
|
||||
moved \(10 \times 10 \times 10\) grids on the sphere we start out with.
|
||||
|
||||
As the variability is defined by \(\frac{\mathrm{rank}(\vec{U})}{n}\) we
|
||||
can easily recover the rank of the deformation--matrix \(\vec{U}\). The
|
||||
results are shown in the histogram in figure \ref{fig:histrank3d}.
|
||||
Especially in the centre of the sphere and in the corners of our grid we
|
||||
effectively loose control--points for our parametrization.
|
||||
As the \emph{variability} is defined by
|
||||
\(\frac{\mathrm{rank}(\vec{U})}{n}\) we can easily recover the rank of
|
||||
the deformation--matrix \(\vec{U}\). The results are shown in the
|
||||
histogram in figure \ref{fig:histrank3d}. Especially in the centre of
|
||||
the sphere and in the corners of our grid we effectively loose
|
||||
control--points for our parametrization.
|
||||
|
||||
This of course yields a worse error as when those control--points would
|
||||
be put to use and one should expect a loss in quality evident by a
|
||||
@ -1440,7 +1449,7 @@ higher reconstruction--error opposed to a grid where they are used.
|
||||
Sadly we could not run a in--depth test on this due to computational
|
||||
limitations.
|
||||
|
||||
Nevertheless this hints at the notion, that variability is a good
|
||||
Nevertheless this hints at the notion, that \emph{variability} is a good
|
||||
measure for the overall quality of a fit.
|
||||
|
||||
\subsection{Regularity}\label{regularity-2}
|
||||
@ -1468,21 +1477,21 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
|
||||
\cline{2-4}
|
||||
\multicolumn{3}{c}{} & all: 0.15 (0) \T
|
||||
\end{tabular}
|
||||
\caption[Correlation between regularity and iterations for 3D]{Correlation
|
||||
between regularity and number of iterations for the 3D fitting scenario.
|
||||
\caption[Correlation between *regularity* and iterations for 3D]{Correlation
|
||||
between *regularity* and number of iterations for the 3D fitting scenario.
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
\label{tab:3dreg}
|
||||
\end{table}
|
||||
|
||||
Opposed to the predictions of variability our test on regularity gave a
|
||||
mixed result --- similar to the 1D--case.
|
||||
Opposed to the predictions of \emph{variability} our test on
|
||||
\emph{regularity} gave a mixed result --- similar to the 1D--case.
|
||||
|
||||
In roughly half of the scenarios we have a \emph{significant}, but
|
||||
\emph{weak} to \emph{moderate} correlation between regularity and number
|
||||
of iterations. On the other hand in the scenarios where we increased the
|
||||
number of control--points, namely \(125\) for the
|
||||
\emph{weak} to \emph{moderate} correlation between \emph{regularity} and
|
||||
number of iterations. On the other hand in the scenarios where we
|
||||
increased the number of control--points, namely \(125\) for the
|
||||
\(5 \times 5 \times 5\) grid and \(216\) for the \(6 \times 6 \times 6\)
|
||||
grid we found a \emph{significant}, but \emph{weak}
|
||||
\textbf{anti}--correlation when taking all three tests into
|
||||
@ -1491,14 +1500,14 @@ contradict the findings/trends for the sets with \(64\), \(80\), and
|
||||
\(112\) control--points (first two rows of table \ref{tab:3dreg}).
|
||||
|
||||
Taking all results together we only find a \emph{very weak}, but
|
||||
\emph{significant} link between regularity and the number of iterations
|
||||
needed for the algorithm to converge.
|
||||
\emph{significant} link between \emph{regularity} and the number of
|
||||
iterations needed for the algorithm to converge.
|
||||
|
||||
\begin{figure}[!htb]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png}
|
||||
\caption[Regularity for different 3D--grids]{
|
||||
Plots of regularity against number of iterations for various scenarios together
|
||||
Plots of *regularity* against number of iterations for various scenarios together
|
||||
with a linear fit to indicate trends.}
|
||||
\label{fig:resreg3d}
|
||||
\end{figure}
|
||||
@ -1509,10 +1518,10 @@ The regularity--criterion first behaves as we would like to, but then
|
||||
switches to behave exactly opposite to our expectations, as can be seen
|
||||
in the first three plots. While the number of control--points increases
|
||||
from red to green to blue and the number of iterations decreases, the
|
||||
regularity seems to increase at first, but then decreases again on
|
||||
higher grid--resolutions.
|
||||
\emph{regularity} seems to increase at first, but then decreases again
|
||||
on higher grid--resolutions.
|
||||
|
||||
This can be an artefact of the definition of regularity, as it is
|
||||
This can be an artefact of the definition of \emph{regularity}, as it is
|
||||
defined by the inverse condition--number of the deformation--matrix
|
||||
\(\vec{U}\), being the fraction
|
||||
\(\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}\) between the
|
||||
@ -1524,12 +1533,12 @@ small minimal right singular value occurring on higher grid--resolutions
|
||||
seems likely the problem.
|
||||
|
||||
Adding to this we also noted, that in the case of the
|
||||
\(10 \times 10 \times 10\)--grid the regularity was always \(0\), as a
|
||||
non--contributing control-point yields a \(0\)--column in the
|
||||
\(10 \times 10 \times 10\)--grid the \emph{regularity} was always \(0\),
|
||||
as a non--contributing control--point yields a \(0\)--column in the
|
||||
deformation--matrix, thus letting \(\sigma_\mathrm{min} = 0\). A better
|
||||
definition for regularity (i.e.~using the smallest non--zero right
|
||||
singular value) could solve this particular issue, but not fix the trend
|
||||
we noticed above.
|
||||
definition for \emph{regularity} (i.e.~using the smallest non--zero
|
||||
right singular value) could solve this particular issue, but not fix the
|
||||
trend we noticed above.
|
||||
|
||||
\subsection{Improvement Potential}\label{improvement-potential-2}
|
||||
|
||||
@ -1556,8 +1565,8 @@ $4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \time
|
||||
\cline{2-4}
|
||||
\multicolumn{3}{c}{} & all: 0.95 (0) \T
|
||||
\end{tabular}
|
||||
\caption[Correlation between improvement--potential and fitting--error for 3D]{Correlation
|
||||
between improvement--potential and fitting--error for the 3D fitting scenario.
|
||||
\caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation
|
||||
between *improvement potential* and fitting--error for the 3D fitting scenario.
|
||||
Displayed are the negated Spearman coefficients with the corresponding p--values
|
||||
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
||||
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
||||
@ -1576,21 +1585,22 @@ gradients anyway.
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png}
|
||||
\caption[Improvement potential for different 3D--grids]{
|
||||
Plots of improvement potential against error given by our fitness--function
|
||||
Plots of *improvement potential* against error given by our *fitness--function*
|
||||
after convergence together with a linear fit of each of the plotted data to
|
||||
indicate trends.}
|
||||
\label{fig:resimp3d}
|
||||
\end{figure}
|
||||
|
||||
We plotted our findings on the improvement potential in a similar way as
|
||||
we did before with the regularity. In figure \ref{fig:resimp3d} one can
|
||||
clearly see the correlation and the spread within each setup and the
|
||||
behaviour when we increase the number of control--points.
|
||||
We plotted our findings on the \emph{improvement potential} in a similar
|
||||
way as we did before with the \emph{regularity}. In figure
|
||||
\ref{fig:resimp3d} one can clearly see the correlation and the spread
|
||||
within each setup and the behaviour when we increase the number of
|
||||
control--points.
|
||||
|
||||
Along with this we also give the Spearman--coefficients along with their
|
||||
p--values in table \ref{tab:3dimp}. Within one scenario we only find a
|
||||
\emph{weak} to \emph{moderate} correlation between the improvement
|
||||
potential and the fitting error, but all findings (except for
|
||||
\emph{weak} to \emph{moderate} correlation between the \emph{improvement
|
||||
potential} and the fitting error, but all findings (except for
|
||||
\(7 \times 4 \times 4\) and \(6 \times 6 \times 6\)) are significant.
|
||||
|
||||
If we take multiple datasets into account the correlation is \emph{very
|
||||
@ -1598,30 +1608,30 @@ strong} and \emph{significant}, which is good, as this functions as a
|
||||
litmus--test, because the quality is naturally tied to the number of
|
||||
control--points.
|
||||
|
||||
All in all the improvement potential seems to be a good and sensible
|
||||
measure of quality, even given gradients of varying quality.
|
||||
All in all the \emph{improvement potential} seems to be a good and
|
||||
sensible measure of quality, even given gradients of varying quality.
|
||||
|
||||
Lastly, a small note on the behaviour of improvement potential and
|
||||
convergence speed, as we used this in the 1D case to argue, why the
|
||||
Lastly, a small note on the behaviour of \emph{improvement potential}
|
||||
and convergence speed, as we used this in the 1D case to argue, why the
|
||||
\emph{regularity} defied our expectations. As a contrast we wanted to
|
||||
show, that improvement potential cannot serve for good predictions of
|
||||
the convergence speed. In figure \ref{fig:imp1d3d} we show improvement
|
||||
potential against number of iterations for both scenarios. As one can
|
||||
see, in the 1D scenario we have a \emph{strong} and \emph{significant}
|
||||
correlation (with \(-r_S = -0.72\), \(p = 0\)), whereas in the 3D
|
||||
scenario we have the opposite \emph{significant} and \emph{strong}
|
||||
effect (with \(-r_S = 0.69\), \(p=0\)), so these correlations clearly
|
||||
seem to be dependent on the scenario and are not suited for
|
||||
generalization.
|
||||
show, that \emph{improvement potential} cannot serve for good
|
||||
predictions of the convergence speed. In figure \ref{fig:imp1d3d} we
|
||||
show \emph{improvement potential} against number of iterations for both
|
||||
scenarios. As one can see, in the 1D scenario we have a \emph{strong}
|
||||
and \emph{significant} correlation (with \(-r_S = -0.72\), \(p = 0\)),
|
||||
whereas in the 3D scenario we have the opposite \emph{significant} and
|
||||
\emph{strong} effect (with \(-r_S = 0.69\), \(p=0\)), so these
|
||||
correlations clearly seem to be dependent on the scenario and are not
|
||||
suited for generalization.
|
||||
|
||||
\begin{figure}[hbt]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{img/imp1d3d.png}
|
||||
\caption[Improvement potential and convergence speed for 1D and 3D--scenarios]{
|
||||
\caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{
|
||||
\newline
|
||||
Left: Improvement potential against convergence speed for the
|
||||
Left: *Improvement potential* against convergence speed for the
|
||||
1D--scenario\newline
|
||||
Right: Improvement potential against convergence speed for the 3D--scnario
|
||||
Right: *Improvement potential* against convergence speed for the 3D--scnario
|
||||
}
|
||||
\label{fig:imp1d3d}
|
||||
\end{figure}
|
||||
@ -1630,36 +1640,36 @@ Right: Improvement potential against convergence speed for the 3D--scnario
|
||||
|
||||
\label{sec:dis}
|
||||
|
||||
In this thesis we took a look at the different criteria for evolvability
|
||||
as introduced by Richter et al.\cite{anrichterEvol}, namely
|
||||
\emph{variability}, \emph{regularity} and \emph{improvement potential}
|
||||
under different setup--conditions. Where Richter et al. used \acf{RBF},
|
||||
we employed \acf{FFD} to set up a low--complexity parametrization of a
|
||||
more complex vertex--mesh.
|
||||
In this thesis we took a look at the different criteria for
|
||||
\emph{evolvability} as introduced by Richter et al.\cite{anrichterEvol},
|
||||
namely \emph{variability}, \emph{regularity} and \emph{improvement
|
||||
potential} under different setup--conditions. Where Richter et al. used
|
||||
\acf{RBF}, we employed \acf{FFD} to set up a low--complexity
|
||||
parametrization of a more complex vertex--mesh.
|
||||
|
||||
In our findings we could show in the 1D--scenario, that there were
|
||||
statistically significant very strong correlations between
|
||||
\emph{variability and fitting error} (\(0.94\)) and
|
||||
\emph{improvement--potential and fitting error} (\(1.0\)) with
|
||||
comparable results than Richter et al. (with \(0.31\) to \(0.88\) for
|
||||
the former and \(0.75\) to \(0.99\) for the latter), whereas we found
|
||||
only weak correlations for \emph{regularity and convergence--speed}
|
||||
(\(0.28\)) opposed to Richter et al. with \(0.39\) to
|
||||
\(0.91\).\footnote{We only took statistically \emph{significant} results
|
||||
into consideration when compiling these numbers. Details are given in
|
||||
the respective chapters.}
|
||||
statistically \emph{significant} \emph{very strong} correlations between
|
||||
\emph{variability and fitting error} (\(0.94\)) and \emph{improvement
|
||||
potential and fitting error} (\(1.0\)) with comparable results than
|
||||
Richter et al. (with \(0.31\) to \(0.88\) for the former and \(0.75\) to
|
||||
\(0.99\) for the latter), whereas we found only \emph{weak} correlations
|
||||
for \emph{regularity and convergence--speed} (\(0.28\)) opposed to
|
||||
Richter et al. with \(0.39\) to \(0.91\).\footnote{We only took
|
||||
statistically \emph{significant} results into consideration when
|
||||
compiling these numbers. Details are given in the respective chapters.}
|
||||
|
||||
For the 3D--scenario our results show a very strong, significant
|
||||
correlation between \emph{variability and fitting error} with \(0.89\)
|
||||
to \(0.94\), which are pretty much in line with the findings of Richter
|
||||
et al. (\(0.65\) to \(0.95\)). The correlation between \emph{improvement
|
||||
potential and fitting error} behave similar, with our findings having a
|
||||
significant coefficient of \(0.3\) to \(0.95\) depending on the
|
||||
grid--resolution compared to the \(0.61\) to \(0.93\) from Richter et
|
||||
al. In the case of the correlation of \emph{regularity and convergence
|
||||
speed} we found very different (and often not significant) correlations
|
||||
and anti--correlations ranging from \(-0.25\) to \(0.46\), whereas
|
||||
Richter et al. reported correlations between \(0.34\) to \(0.87\).
|
||||
For the 3D--scenario our results show a \emph{very strong},
|
||||
\emph{significant} correlation between \emph{variability and fitting
|
||||
error} with \(0.89\) to \(0.94\), which are pretty much in line with the
|
||||
findings of Richter et al. (\(0.65\) to \(0.95\)). The correlation
|
||||
between \emph{improvement potential and fitting error} behave similar,
|
||||
with our findings having a significant coefficient of \(0.3\) to
|
||||
\(0.95\) depending on the grid--resolution compared to the \(0.61\) to
|
||||
\(0.93\) from Richter et al. In the case of the correlation of
|
||||
\emph{regularity and convergence speed} we found very different (and
|
||||
often not significant) correlations and anti--correlations ranging from
|
||||
\(-0.25\) to \(0.46\), whereas Richter et al. reported correlations
|
||||
between \(0.34\) to \(0.87\).
|
||||
|
||||
Taking these results into consideration, one can say, that
|
||||
\emph{variability} and \emph{improvement potential} are very good
|
||||
@ -1683,14 +1693,11 @@ manipulation in \cite{anrichterEvol}, whereas we merely used an indirect
|
||||
indirect manipulations, the usage of \acf{DM--FFD} could also work
|
||||
better with the criteria we examined. This can also solve the problem of
|
||||
bad singular values for the \emph{regularity} as the incorporation of
|
||||
the parametrization of the points on the surface, which are the
|
||||
essential part of a direct--manipulation, could cancel out a bad
|
||||
the parametrization of the points on the surface --- which are the
|
||||
essential part of a direct--manipulation --- could cancel out a bad
|
||||
control--grid as the bad control--points are never or negligibly used to
|
||||
parametrize those surface--points.
|
||||
|
||||
\improvement[inline]{Bibliotheksverzeichnis links anpassen. DOI überschreibt
|
||||
Direktlinks des Autors.}
|
||||
|
||||
% \backmatter
|
||||
\cleardoublepage
|
||||
|
||||
@ -1725,10 +1732,10 @@ Direktlinks des Autors.}
|
||||
% \addtocounter{chapter}{1}
|
||||
\newpage
|
||||
% \listoftables
|
||||
\listoftodos
|
||||
% \listoftodos
|
||||
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
|
||||
% \addtocounter{chapter}{1}
|
||||
\newpage
|
||||
% \newpage
|
||||
% \printindex
|
||||
|
||||
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%
|
||||
|
@ -175,10 +175,10 @@ $body$
|
||||
% \addtocounter{chapter}{1}
|
||||
\newpage
|
||||
% \listoftables
|
||||
\listoftodos
|
||||
% \listoftodos
|
||||
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
|
||||
% \addtocounter{chapter}{1}
|
||||
\newpage
|
||||
% \newpage
|
||||
% \printindex
|
||||
|
||||
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%
|
||||
|
@ -14,7 +14,7 @@ set xtics norangelimit
|
||||
set xtics ()
|
||||
set ytics border in scale 1,0.5 nomirror norotate autojustify
|
||||
set title "Fitting Errors of 1D Function Approximation for various grids\n"
|
||||
set ylabel "Squared Error of Vertex-Difference"
|
||||
set ylabel "Fitting-Error according to fitness-function"
|
||||
|
||||
header ="`head -1 errors.csv | sed -s "s/\"//g" | sed -s "s/,/ /g"`"
|
||||
set for [i=1:words(header)] xtics (word(header,i) i)
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 5.1 KiB After Width: | Height: | Size: 5.3 KiB |
Loading…
Reference in New Issue
Block a user