2017-08-23 19:18:44 +00:00
|
|
|
|
% bibtotoc[numbered] : Literaturv. wird in Inhaltsv. aufgenommen
|
|
|
|
|
% abstracton : Abstract mit Ueberschrift
|
|
|
|
|
\documentclass[
|
|
|
|
|
a4paper, % default
|
2017-10-28 18:54:05 +00:00
|
|
|
|
11pt, % default = 11pt
|
|
|
|
|
DIV=calc,
|
|
|
|
|
BCOR6mm, % Bindungskorrektur bei Klebebindung 6mm, bei Lochen BCOR8.25mm
|
2017-08-23 19:18:44 +00:00
|
|
|
|
twoside, % default, 2seitig
|
|
|
|
|
titlepage,
|
|
|
|
|
% pagesize=auto
|
|
|
|
|
% openany, % Kapitel koennen auch auf geraden Seiten starten
|
|
|
|
|
% draft % schneller compillieren, Bild-dummy
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% appendixprefix, % Anhang mit Bezeichner
|
|
|
|
|
bibtotocnumbered,
|
|
|
|
|
liststotocnumbered,
|
|
|
|
|
listof=totocnumbered,
|
|
|
|
|
index=totocnumbered,
|
2017-10-01 19:04:04 +00:00
|
|
|
|
xcolor=dvipsnames,
|
2017-08-23 19:18:44 +00:00
|
|
|
|
]{scrbook}
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Literaturverzeichnisstil %%%%%%%%%%%%%%%
|
|
|
|
|
% achtung, auch \bibstyle, unten, anpassen!
|
|
|
|
|
% \usepackage[square]{natbib} % fuer bibstyle natdin/ see ../natbib.pdf
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Packages %%%%%%%%%%%%%%%
|
|
|
|
|
\input{settings/packages}
|
|
|
|
|
\makeindex
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Graphics %%%%%%%%%%%%%%%
|
|
|
|
|
\graphicspath{{pics/}}
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Globale Einstellungen %%%%%%%%%%%%%%%
|
|
|
|
|
\input{settings/commands}
|
|
|
|
|
\input{settings/environments}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\setlength{\parindent}{0pt} % kein einzug bei absaetzen
|
|
|
|
|
\setlength{\parskip}{12pt plus6pt minus2pt} % dafür abstand zwischen absäzen
|
2017-08-23 19:18:44 +00:00
|
|
|
|
% \renewcommand{\familydefault}{\sfdefault}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\setstretch{1.5} % 1.5-facher zeilenabstand
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\renewcommand{\arraystretch}{1.5} % größere Abstände in Tabellen etc.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Header - Footer %%%%%%%%%%%%%%%
|
|
|
|
|
% ### Fr 2 Seitig (option twopage):
|
|
|
|
|
\usepackage{fancyhdr}%http://www.tug.org/tex-archive/info/german/fancyhdr
|
|
|
|
|
\pagestyle{fancy} % must be called before the following renewcommands !!!
|
|
|
|
|
\fancyhead{} % Alte Definition loeschen
|
|
|
|
|
\fancyfoot{} % dito
|
|
|
|
|
\renewcommand{\chaptermark}[1]{\markboth{\chaptername\ \thechapter{}: #1}{}}
|
|
|
|
|
\renewcommand{\sectionmark}[1]{\markright{\thesection{}~~#1}}
|
|
|
|
|
% % um das hard codierte makeuppercase zu verhindern
|
|
|
|
|
\fancyhead[EL]{\textrm{\nouppercase\leftmark}}% Even=linke Seiten und dort links, also aussn das \leftmark
|
|
|
|
|
\fancyhead[OR]{\textrm{\nouppercase\rightmark}}% Odd=rechte Seiten und dort rechts, also aussen das \rightmark
|
|
|
|
|
\fancyfoot[RO,LE]{\thepage} % Seitenzahl : rechts ungerade, links gerade
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
% ###### Title ######
|
|
|
|
|
|
|
|
|
|
\usepackage[explicit]{titlesec}
|
|
|
|
|
\newcommand{\hsp}{\hspace{20pt}}
|
|
|
|
|
% \titleformat{\chapter}[hang]{\Huge\bfseries\ }{\textcolor{CadetBlue}{\thechapter} #1}{20pt}{\Huge\bfseries\ }
|
|
|
|
|
\titleformat{name=\chapter,numberless}[hang]{\Huge\bfseries\ }{#1}{20pt}{\Huge\bfseries\ }
|
|
|
|
|
\titleformat{\chapter}[hang]{\Huge\bfseries\ }{\color{CadetBlue}\thechapter}{20pt}{\begin{tabular}[t]{@{\color{CadetBlue}\vrule width 2pt}>{\hangindent=20pt\hsp}p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\titleformat{name=\section,numberless}[hang]{\Large\bfseries\ }{#1}{32pt}{\Large\bfseries\ }
|
|
|
|
|
\titleformat{\section}[hang]{\Large\bfseries\ }{\color{CadetBlue}\thesection}{32pt}{\begin{tabular}[t]{p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
|
|
|
|
|
\titleformat{name=\subsection,numberless}[hang]{\large\bfseries\ }{#1}{27pt}{\large\bfseries\ }
|
|
|
|
|
\titleformat{\subsection}[hang]{\large\bfseries\ }{\color{CadetBlue}\thesubsection}{27pt}{\begin{tabular}[t]{p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
|
2017-10-08 19:08:29 +00:00
|
|
|
|
|
2017-08-23 19:18:44 +00:00
|
|
|
|
% ### fr 1 seitig
|
|
|
|
|
%\usepackage{fancyhdr} %
|
|
|
|
|
%\lhead{\textsf{\noupercase\leftmark}}
|
|
|
|
|
%\chead{}
|
|
|
|
|
%\rhead{\textsf{\nouppercase\rightmark}}
|
|
|
|
|
%\lfoot{}
|
|
|
|
|
%\cfoot{\textsf{\thepage}}
|
|
|
|
|
%\rfoot{}
|
|
|
|
|
|
|
|
|
|
\setkomafont{sectioning}{\rmfamily\bfseries}
|
|
|
|
|
\setcounter{tocdepth}{3}
|
|
|
|
|
%\setcounter{secnumdepth}{3}
|
|
|
|
|
% \input{settings/hyphenation} %% Manchmal bricht latex nicht richtig um. hier trennregeln rein.
|
|
|
|
|
% \includeonly{%
|
|
|
|
|
% % files/0_titlepage.tex
|
|
|
|
|
% % files/1_0_introduction,%
|
|
|
|
|
% % files/2_0_knownDCJ,%
|
|
|
|
|
% % files/3_0_DCJIndels,%
|
|
|
|
|
% % files/4_0_DCJIndels_1comps,%
|
|
|
|
|
% files/5_0_DCJIndels_2comps,%
|
|
|
|
|
% % files/6_0_implementation,%
|
|
|
|
|
% % files/7_0_evaluation%
|
|
|
|
|
% % ,files/8_0_conclusion%
|
|
|
|
|
% }
|
2017-08-28 16:39:18 +00:00
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% PANDOC-nedded defs %%%%%%%%%%
|
|
|
|
|
\providecommand{\tightlist}{%
|
|
|
|
|
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
|
|
|
|
|
|
|
|
|
|
%disable "Redefining ngerman shorthand"-Message
|
|
|
|
|
% \makeatletter
|
|
|
|
|
% \patchcmd{\pdfstringdef}
|
|
|
|
|
% {\csname HyPsd@babel@}
|
|
|
|
|
% {\let\bbl@info\@gobble\csname HyPsd@babel@}
|
|
|
|
|
% {}{}
|
|
|
|
|
% \makeatother
|
|
|
|
|
|
2017-08-23 19:18:44 +00:00
|
|
|
|
%%%%%%%%%%%%%%% Hauptdokument %%%%%%%%%%%%%%%
|
|
|
|
|
\begin{document}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
% ###### Autoref definitions (hyperref package)#####
|
|
|
|
|
\def\subtableautorefname{Table}
|
|
|
|
|
\def\algorithmautorefname{Algorithm}
|
|
|
|
|
\def\chapterautorefname{Chapter}
|
|
|
|
|
\def\sectionautorefname{Section}
|
|
|
|
|
\def\definitionautorefname{Definition}
|
|
|
|
|
\def\exampleautorefname{Example}
|
|
|
|
|
\def\observationautorefname{Observation}
|
|
|
|
|
\def\propositionautorefname{Proposition}
|
|
|
|
|
\def\lemmaautorefname{Lemma}
|
|
|
|
|
% in diesem Dokument nicht verwendet:
|
|
|
|
|
% \def\subsectionautorefname{Subsection}
|
|
|
|
|
% \def\Subsubsectionautorefname{Subsubsection}
|
|
|
|
|
% \def\subfigureautorefname{Figure}
|
|
|
|
|
% \def\claimautorefname{Claim}
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Deckblatt %%%%%%%%%%%%%%%
|
|
|
|
|
\extratitle{}
|
|
|
|
|
\input{files/titlepage}
|
|
|
|
|
%\input{files/titlepage.pdf} % Rueckseite leer
|
|
|
|
|
% \input{files/0_deckblatt/title}
|
|
|
|
|
\pagestyle{empty} % Rueckseite leer
|
|
|
|
|
%
|
|
|
|
|
%%%%%%%%%%%%%%% Verzeichnisse %%%%%%%%%%%%%%%
|
|
|
|
|
\frontmatter % Abstrakte Gliederungsebene: Anfang des Buches
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\renewcommand{\autodot}{}
|
|
|
|
|
\tableofcontents % Rueckseite leer
|
2017-08-23 19:18:44 +00:00
|
|
|
|
%\lstlistoflistings % fuer listingsverzeichnis mit package listings
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Hauptteil %%%%%%%%%%%%%%%
|
|
|
|
|
% Insgesamt ca. 60-100 Seiten Davon mindesten 50% Eigene Arbeit
|
|
|
|
|
\mainmatter %Abstrakte Gliederungsebene: Hauptteil des Buches
|
|
|
|
|
\pagestyle{fancy}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\pagenumbering{arabic}
|
2017-08-30 18:56:35 +00:00
|
|
|
|
\chapter*{How to read this Thesis}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
|
|
|
|
|
As a guide through the nomenclature used in the formulas we prepend this
|
|
|
|
|
chapter.
|
|
|
|
|
|
|
|
|
|
Unless otherwise noted the following holds:
|
|
|
|
|
|
|
|
|
|
\begin{itemize}
|
|
|
|
|
\tightlist
|
|
|
|
|
\item
|
|
|
|
|
lowercase letters \(x,y,z\)\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
refer to real variables and represent the coordinates of a point in
|
|
|
|
|
3D--Space.
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\item
|
|
|
|
|
lowercase letters \(u,v,w\)\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
refer to real variables between \(0\) and \(1\) used as coefficients
|
2017-10-08 21:29:45 +00:00
|
|
|
|
in a 3D B--Spline grid.
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\item
|
|
|
|
|
other lowercase letters\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
refer to other scalar (real) variables.
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\item
|
|
|
|
|
lowercase \textbf{bold} letters (e.g. \(\vec{x},\vec{y}\))\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
refer to 3D coordinates
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\item
|
2017-09-09 16:38:43 +00:00
|
|
|
|
uppercase \textbf{BOLD} letters (e.g. \(\vec{D}, \vec{M}\))\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
refer to Matrices
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\end{itemize}
|
|
|
|
|
|
|
|
|
|
\chapter{Introduction}\label{introduction}
|
|
|
|
|
|
2017-10-07 23:07:37 +00:00
|
|
|
|
Many modern industrial design processes require advanced optimization
|
2017-10-14 11:45:22 +00:00
|
|
|
|
methods due to the increased complexity resulting from more and more
|
|
|
|
|
degrees of freedom as methods refine and/or other methods are used.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
Examples for this are physical domains like aerodynamics (i.e.~drag),
|
2017-10-14 11:45:22 +00:00
|
|
|
|
fluid dynamics (i.e.~throughput of liquid) --- where the complexity
|
|
|
|
|
increases with the temporal and spatial resolution of the simulation ---
|
|
|
|
|
or known hard algorithmic problems in informatics (i.e.~layouting of
|
|
|
|
|
circuit boards or stacking of 3D--objects). Moreover these are typically
|
|
|
|
|
not static environments but requirements shift over time or from case to
|
|
|
|
|
case.
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\begin{figure}[hbt]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/Evo_overview.png}
|
|
|
|
|
\caption{Example of the use of evolutionary algorithms in automotive design
|
|
|
|
|
(from \cite{anrichterEvol}).}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
Evolutionary algorithms cope especially well with these problem domains
|
2017-10-07 23:07:37 +00:00
|
|
|
|
while addressing all the issues at hand\cite{minai2006complex}. One of
|
|
|
|
|
the main concerns in these algorithms is the formulation of the problems
|
2017-10-11 17:53:01 +00:00
|
|
|
|
in terms of a \emph{genome} and \emph{fitness--function}. While one can
|
|
|
|
|
typically use an arbitrary cost--function for the
|
|
|
|
|
\emph{fitness--functions} (i.e.~amount of drag, amount of space, etc.),
|
|
|
|
|
the translation of the problem--domain into a simple parametric
|
|
|
|
|
representation (the \emph{genome}) can be challenging.
|
|
|
|
|
|
|
|
|
|
This translation is often necessary as the target of the optimization
|
2017-10-29 20:44:10 +00:00
|
|
|
|
may have too many degrees of freedom for a reasonable computation. In
|
|
|
|
|
the example of an aerodynamic simulation of drag onto an object, those
|
|
|
|
|
object--designs tend to have a high number of vertices to adhere to
|
|
|
|
|
various requirements (visual, practical, physical, etc.). A simpler
|
|
|
|
|
representation of the same object in only a few parameters that
|
|
|
|
|
manipulate the whole in a sensible matter are desirable, as this often
|
|
|
|
|
decreases the computation time significantly.
|
2017-10-11 17:53:01 +00:00
|
|
|
|
|
|
|
|
|
Additionally one can exploit the fact, that drag in this case is
|
|
|
|
|
especially sensitive to non--smooth surfaces, so that a smooth local
|
|
|
|
|
manipulation of the surface as a whole is more advantageous than merely
|
|
|
|
|
random manipulation of the vertices.
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
The quality of such a low--dimensional representation in biological
|
2017-10-11 17:53:01 +00:00
|
|
|
|
evolution is strongly tied to the notion of
|
|
|
|
|
\emph{evolvability}\cite{wagner1996complex}, as the parametrization of
|
|
|
|
|
the problem has serious implications on the convergence speed and the
|
|
|
|
|
quality of the solution\cite{Rothlauf2006}. However, there is no
|
|
|
|
|
consensus on how \emph{evolvability} is defined and the meaning varies
|
2017-10-27 12:31:55 +00:00
|
|
|
|
from context to context\cite{richter2015evolvability}. As a consequence
|
|
|
|
|
there is need for some criteria we can measure, so that we are able to
|
|
|
|
|
compare different representations to learn and improve upon these.
|
2017-10-11 17:53:01 +00:00
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\begin{figure}[hbt]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/deformations.png}
|
|
|
|
|
\caption{Example of RBF--based deformation and FFD targeting the same mesh.}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-11 17:53:01 +00:00
|
|
|
|
One example of such a general representation of an object is to generate
|
|
|
|
|
random points and represent vertices of an object as distances to these
|
|
|
|
|
points --- for example via \acf{RBF}. If one (or the algorithm) would
|
2017-10-27 12:31:55 +00:00
|
|
|
|
move such a point the object will get deformed only locally (due to the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\ac{RBF}). As this results in a simple mapping from the parameter--space
|
2017-10-11 17:53:01 +00:00
|
|
|
|
onto the object one can try out different representations of the same
|
2017-10-27 12:31:55 +00:00
|
|
|
|
object and evaluate which criteria may be suited to describe this notion
|
|
|
|
|
of \emph{evolvability}. This is exactly what Richter et
|
|
|
|
|
al.\cite{anrichterEvol} have done.
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
|
|
|
|
As we transfer the results of Richter et al.\cite{anrichterEvol} from
|
2017-10-11 17:53:01 +00:00
|
|
|
|
using \acf{RBF} as a representation to manipulate geometric objects to
|
2017-10-29 20:44:10 +00:00
|
|
|
|
the use of \acf{FFD} we will use the same definition for
|
|
|
|
|
\emph{evolvability} the original author used, namely \emph{regularity},
|
|
|
|
|
\emph{variability}, and \emph{improvement potential}. We introduce these
|
|
|
|
|
term in detail in Chapter \ref{sec:intro:rvi}. In the original
|
|
|
|
|
publication the author could show a correlation between these
|
|
|
|
|
evolvability--criteria with the quality and convergence speed of such
|
|
|
|
|
optimization.
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
2017-10-11 17:53:01 +00:00
|
|
|
|
We will replicate the same setup on the same objects but use \acf{FFD}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
instead of \acf{RBF} to create a local deformation near the
|
|
|
|
|
control--points and evaluate if the evolution--criteria still work as a
|
|
|
|
|
predictor for \emph{evolvability} of the representation given the
|
|
|
|
|
different deformation scheme, as suspected in \cite{anrichterEvol}.
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
First we introduce different topics in isolation in Chapter
|
|
|
|
|
\ref{sec:back}. We take an abstract look at the definition of \ac{FFD}
|
2017-10-08 21:29:45 +00:00
|
|
|
|
for a one--dimensional line (in \ref{sec:back:ffd}) and discuss why this
|
2017-10-08 19:08:29 +00:00
|
|
|
|
is a sensible deformation function (in \ref{sec:back:ffdgood}). Then we
|
2017-10-10 15:45:58 +00:00
|
|
|
|
establish some background--knowledge of evolutionary algorithms (in
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\ref{sec:back:evo}) and why this is useful in our domain (in
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\ref{sec:back:evogood}) followed by the definition of the different
|
2017-10-29 20:44:10 +00:00
|
|
|
|
evolvability--criteria established in \cite{anrichterEvol} (in
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\ref {sec:intro:rvi}).
|
2017-10-08 19:08:29 +00:00
|
|
|
|
|
|
|
|
|
In Chapter \ref{sec:impl} we take a look at our implementation of
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\ac{FFD} and the adaptation for 3D--meshes that were used. Next, in
|
|
|
|
|
Chapter \ref{sec:eval}, we describe the different scenarios we use to
|
|
|
|
|
evaluate the different evolvability--criteria incorporating all aspects
|
|
|
|
|
introduced in Chapter \ref{sec:back}. Following that, we evaluate the
|
|
|
|
|
results in Chapter \ref{sec:res} with further on discussion, summary and
|
|
|
|
|
outlook in Chapter \ref{sec:dis}.
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
|
|
|
|
\chapter{Background}\label{background}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:back}
|
|
|
|
|
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\section{\texorpdfstring{What is \acf{FFD}?}{What is ?}}\label{what-is}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:back:ffd}
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-08-28 16:39:18 +00:00
|
|
|
|
First of all we have to establish how a \ac{FFD} works and why this is a
|
2017-10-29 18:02:31 +00:00
|
|
|
|
good tool for deforming geometric objects (especially meshes in our
|
|
|
|
|
case) in the first place. For simplicity we only summarize the 1D--case
|
|
|
|
|
from \cite{spitzmuller1996bezier} here and go into the extension to the
|
|
|
|
|
3D case in chapter~\ref{3dffd}.
|
2017-10-11 17:53:01 +00:00
|
|
|
|
|
|
|
|
|
The main idea of \ac{FFD} is to create a function
|
|
|
|
|
\(s : [0,1[^d \mapsto \mathbb{R}^d\) that spans a certain part of a
|
2017-10-29 20:44:10 +00:00
|
|
|
|
vector--space and is only linearly parametrized by some special
|
|
|
|
|
control--points \(p_i\) and an constant attribution--function
|
|
|
|
|
\(a_i(u)\), so \[
|
2017-10-27 12:31:55 +00:00
|
|
|
|
s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i}
|
2017-10-11 17:53:01 +00:00
|
|
|
|
\] can be thought of a representation of the inside of the convex hull
|
2017-10-29 20:44:10 +00:00
|
|
|
|
generated by the control--points where each position inside can be
|
|
|
|
|
accessed by the right \(u \in [0,1[^d\).
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-10-11 17:53:01 +00:00
|
|
|
|
\begin{figure}[!ht]
|
|
|
|
|
\begin{center}
|
|
|
|
|
\includegraphics[width=0.7\textwidth]{img/B-Splines.png}
|
|
|
|
|
\end{center}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Example of B--Splines]{Example of a parametrization of a line with
|
2017-10-11 17:53:01 +00:00
|
|
|
|
corresponding deformation to generate a deformed objet}
|
|
|
|
|
\label{fig:bspline}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
In the 1--dimensional example in figure~\ref{fig:bspline}, the
|
2017-10-29 18:02:31 +00:00
|
|
|
|
control--points are indicated as red dots and the colour--gradient
|
|
|
|
|
should hint at the \(u\)--values ranging from \(0\) to \(1\).
|
2017-10-11 17:53:01 +00:00
|
|
|
|
|
|
|
|
|
We now define a \acf{FFD} by the following:\\
|
2017-09-06 15:07:46 +00:00
|
|
|
|
Given an arbitrary number of points \(p_i\) alongside a line, we map a
|
|
|
|
|
scalar value \(\tau_i \in [0,1[\) to each point with
|
2017-10-11 17:53:01 +00:00
|
|
|
|
\(\tau_i < \tau_{i+1} \forall i\) according to the position of \(p_i\)
|
|
|
|
|
on said line. Additionally, given a degree of the target polynomial
|
|
|
|
|
\(d\) we define the curve \(N_{i,d,\tau_i}(u)\) as follows:
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-09-09 16:38:43 +00:00
|
|
|
|
\begin{equation} \label{eqn:ffd1d1}
|
2017-09-06 15:07:46 +00:00
|
|
|
|
N_{i,0,\tau}(u) = \begin{cases} 1, & u \in [\tau_i, \tau_{i+1}[ \\ 0, & \mbox{otherwise} \end{cases}
|
|
|
|
|
\end{equation}
|
2017-09-09 16:38:43 +00:00
|
|
|
|
|
|
|
|
|
and
|
|
|
|
|
|
|
|
|
|
\begin{equation} \label{eqn:ffd1d2}
|
2017-09-06 15:07:46 +00:00
|
|
|
|
N_{i,d,\tau}(u) = \frac{u-\tau_i}{\tau_{i+d}} N_{i,d-1,\tau}(u) + \frac{\tau_{i+d+1} - u}{\tau_{i+d+1}-\tau_{i+1}} N_{i+1,d-1,\tau}(u)
|
|
|
|
|
\end{equation}
|
|
|
|
|
|
|
|
|
|
If we now multiply every \(p_i\) with the corresponding
|
|
|
|
|
\(N_{i,d,\tau_i}(u)\) we get the contribution of each point \(p_i\) to
|
2017-10-29 18:02:31 +00:00
|
|
|
|
the final curve--point parametrized only by \(u \in [0,1[\). As can be
|
2017-10-11 17:53:01 +00:00
|
|
|
|
seen from \eqref{eqn:ffd1d2} we only access points \([p_i..p_{i+d}]\)
|
|
|
|
|
for any given \(i\)\footnote{one more for each recursive step.}, which
|
|
|
|
|
gives us, in combination with choosing \(p_i\) and \(\tau_i\) in order,
|
|
|
|
|
only a local interference of \(d+1\) points.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
|
|
|
|
We can even derive this equation straightforward for an arbitrary
|
|
|
|
|
\(N\)\footnote{\emph{Warning:} in the case of \(d=1\) the
|
2017-10-08 21:29:45 +00:00
|
|
|
|
recursion--formula yields a \(0\) denominator, but \(N\) is also
|
|
|
|
|
\(0\). The right solution for this case is a derivative of \(0\)}:
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
|
|
|
|
\[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\]
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
For a B--Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these
|
2017-10-29 20:44:10 +00:00
|
|
|
|
derivations yield
|
|
|
|
|
\(\left(\frac{\partial}{\partial u}\right)^d s(u) = 0\).
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
|
|
|
|
Another interesting property of these recursive polynomials is that they
|
|
|
|
|
are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in
|
2017-10-11 17:53:01 +00:00
|
|
|
|
between \(\tau_i\) and \(\tau_{i+d}\) and out between \(\tau_{i+1}\),
|
|
|
|
|
and \(\tau_{i+d+1}\) as can bee seen from the two coefficients in every
|
|
|
|
|
step of the recursion.
|
|
|
|
|
|
2017-10-14 11:45:22 +00:00
|
|
|
|
This means that all changes are only a local linear combination between
|
|
|
|
|
the control--point \(p_i\) to \(p_{i+d+1}\) and consequently this yields
|
2017-10-29 20:44:10 +00:00
|
|
|
|
to the convex--hull--property of B--Splines --- meaning, that no matter
|
2017-10-14 11:45:22 +00:00
|
|
|
|
how we choose our coefficients, the resulting points all have to lie
|
|
|
|
|
inside convex--hull of the control--points.
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
For a given point \(s_i\) we can then calculate the contributions
|
|
|
|
|
\(u_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the
|
2017-10-16 20:45:29 +00:00
|
|
|
|
projection from the control--point--space into the object--space: \[
|
2017-10-29 20:44:10 +00:00
|
|
|
|
s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p}
|
2017-10-11 17:53:01 +00:00
|
|
|
|
\] or written for all points at the same time: \[
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\vec{s} = \vec{U} \vec{p}
|
|
|
|
|
\] where \(\vec{U}\) is the \(n \times m\) transformation--matrix (later
|
2017-10-11 17:53:01 +00:00
|
|
|
|
on called \textbf{deformation matrix}) for \(n\) object--space--points
|
|
|
|
|
and \(m\) control--points.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-10-16 20:45:29 +00:00
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\begin{center}
|
|
|
|
|
\includegraphics[width=\textwidth]{img/unity.png}
|
|
|
|
|
\end{center}
|
|
|
|
|
\caption[B--spline--basis--function as partition of unity]{From \cite[Figure 2.13]{brunet2010contributions}:\newline
|
|
|
|
|
\glqq Some interesting properties of the B--splines. On the natural definition domain
|
2017-10-29 18:02:31 +00:00
|
|
|
|
of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum
|
|
|
|
|
up to one (partition of unity). In this example, we use B--Splines of degree 2.
|
2017-10-16 20:45:29 +00:00
|
|
|
|
The horizontal segment below the abscissa axis represents the domain of
|
|
|
|
|
influence of the B--splines basis function, i.e. the interval on which they are
|
2017-10-29 20:44:10 +00:00
|
|
|
|
not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis
|
2017-10-16 20:45:29 +00:00
|
|
|
|
functions (compact support).\grqq \newline
|
|
|
|
|
Note, that Brunet starts his index at $-d$ opposed to our definition, where we
|
|
|
|
|
start at $0$.}
|
|
|
|
|
\label{fig:partition_unity}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-29 18:02:31 +00:00
|
|
|
|
Furthermore B--Spline--basis--functions form a partition of unity for
|
2017-10-16 20:45:29 +00:00
|
|
|
|
all, but the first and last \(d\)
|
2017-10-29 20:44:10 +00:00
|
|
|
|
control--points\cite{brunet2010contributions}. Therefore we later on use
|
|
|
|
|
the border--points \(d+1\) times, such that \(\sum_j u_{i,j} p_j = p_i\)
|
2017-10-16 20:45:29 +00:00
|
|
|
|
for these points.
|
|
|
|
|
|
|
|
|
|
The locality of the influence of each control--point and the partition
|
|
|
|
|
of unity was beautifully pictured by Brunet, which we included here as
|
|
|
|
|
figure \ref{fig:partition_unity}.
|
|
|
|
|
|
2017-09-06 15:07:46 +00:00
|
|
|
|
\subsection{\texorpdfstring{Why is \ac{FFD} a good deformation
|
|
|
|
|
function?}{Why is a good deformation function?}}\label{why-is-a-good-deformation-function}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:back:ffdgood}
|
|
|
|
|
|
2017-09-06 15:07:46 +00:00
|
|
|
|
The usage of \ac{FFD} as a tool for manipulating follows directly from
|
2017-10-29 20:44:10 +00:00
|
|
|
|
the properties of the polynomials and the correspondence to the
|
|
|
|
|
control--points. Having only a few control--points gives the user a
|
|
|
|
|
nicer high--level--interface, as she only needs to move these points and
|
|
|
|
|
the model follows in an intuitive manner. The deformation is smooth as
|
|
|
|
|
the underlying polygon is smooth as well and affects as many vertices of
|
|
|
|
|
the model as needed. Moreover the changes are always local so one risks
|
|
|
|
|
not any change that a user cannot immediately see.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
|
|
|
|
But there are also disadvantages of this approach. The user loses the
|
|
|
|
|
ability to directly influence vertices and even seemingly simple tasks
|
|
|
|
|
as creating a plateau can be difficult to
|
2017-09-27 20:06:39 +00:00
|
|
|
|
achieve\cite[chapter~3.2]{hsu1991dmffd}\cite{hsu1992direct}.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
|
|
|
|
This disadvantages led to the formulation of
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\acf{DM--FFD}\cite[chapter~3.3]{hsu1991dmffd} in which the user directly
|
|
|
|
|
interacts with the surface--mesh. All interactions will be applied
|
|
|
|
|
proportionally to the control--points that make up the parametrization
|
|
|
|
|
of the interaction--point itself yielding a smooth deformation of the
|
2017-09-06 15:07:46 +00:00
|
|
|
|
surface \emph{at} the surface without seemingly arbitrary scattered
|
2017-10-08 21:29:45 +00:00
|
|
|
|
control--points. Moreover this increases the efficiency of an
|
2017-09-27 20:06:39 +00:00
|
|
|
|
evolutionary optimization\cite{Menzel2006}, which we will use later on.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-10-02 20:13:24 +00:00
|
|
|
|
\begin{figure}[!ht]
|
|
|
|
|
\includegraphics[width=\textwidth]{img/hsu_fig7.png}
|
2017-10-02 20:36:46 +00:00
|
|
|
|
\caption{Figure 7 from \cite{hsu1991dmffd}.}
|
2017-10-02 20:13:24 +00:00
|
|
|
|
\label{fig:hsu_fig7}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
But this approach also has downsides as can be seen in figure
|
|
|
|
|
\ref{fig:hsu_fig7}, as the tessellation of the invisible grid has a
|
|
|
|
|
major impact on the deformation itself.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
All in all \ac{FFD} and \ac{DM--FFD} are still good ways to deform a
|
|
|
|
|
high--polygon mesh albeit the downsides.
|
2017-09-06 15:07:46 +00:00
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
\section{What is evolutionary
|
|
|
|
|
optimization?}\label{what-is-evolutionary-optimization}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:back:evo}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
In this thesis we are using an evolutionary optimization strategy to
|
2017-10-10 11:53:17 +00:00
|
|
|
|
solve the problem of finding the best parameters for our deformation.
|
|
|
|
|
This approach, however, is very generic and we introduce it here in a
|
|
|
|
|
broader sense.
|
2017-10-10 10:24:15 +00:00
|
|
|
|
|
|
|
|
|
\begin{algorithm}
|
2017-10-10 15:45:58 +00:00
|
|
|
|
\caption{An outline of evolutionary algorithms}
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\label{alg:evo}
|
|
|
|
|
\begin{algorithmic}
|
|
|
|
|
\STATE t := 0;
|
|
|
|
|
\STATE initialize $P(0) := \{\vec{a}_1(0),\dots,\vec{a}_\mu(0)\} \in I^\mu$;
|
|
|
|
|
\STATE evaluate $F(0) : \{\Phi(x) | x \in P(0)\}$;
|
|
|
|
|
\WHILE{$c(F(t)) \neq$ \TRUE}
|
|
|
|
|
\STATE recombine: $P’(t) := r(P(t))$;
|
|
|
|
|
\STATE mutate: $P''(t) := m(P’(t))$;
|
|
|
|
|
\STATE evaluate $F''(t) : \{\Phi(x) | x \in P''(t)\}$
|
|
|
|
|
\STATE select: $P(t + 1) := s(P''(t) \cup Q,\Phi)$;
|
|
|
|
|
\STATE t := t + 1;
|
|
|
|
|
\ENDWHILE
|
|
|
|
|
\end{algorithmic}
|
|
|
|
|
\end{algorithm}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
The general shape of an evolutionary algorithm (adapted from
|
2017-10-10 11:53:17 +00:00
|
|
|
|
\cite{back1993overview}) is outlined in Algorithm \ref{alg:evo}. Here,
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\(P(t)\) denotes the population of parameters in step \(t\) of the
|
2017-10-14 11:45:22 +00:00
|
|
|
|
algorithm. The population contains \(\mu\) individuals \(a_i\) from the
|
|
|
|
|
possible individual--set \(I\) that fit the shape of the parameters we
|
|
|
|
|
are looking for. Typically these are initialized by a random guess or
|
|
|
|
|
just zero. Further on we need a so--called \emph{fitness--function}
|
|
|
|
|
\(\Phi : I \mapsto M\) that can take each parameter to a measurable
|
|
|
|
|
space \(M\) (usually \(M = \mathbb{R}\)) along a convergence--function
|
|
|
|
|
\(c : I \mapsto \mathbb{B}\) that terminates the optimization.
|
|
|
|
|
|
|
|
|
|
Biologically speaking the set \(I\) corresponds to the set of possible
|
2017-10-29 18:02:31 +00:00
|
|
|
|
\emph{genotypes} while \(M\) represents the possible observable
|
|
|
|
|
\emph{phenotypes}. \emph{Genotypes} define all initial properties of an
|
|
|
|
|
individual, but their properties are not directly observable. It is the
|
|
|
|
|
genes, that evolve over time (and thus correspond to the parameters we
|
|
|
|
|
are tweaking in our algorithms or the genes in nature), but only the
|
|
|
|
|
\emph{phenotypes} make certain behaviour observable (algorithmically
|
|
|
|
|
through our \emph{fitness--function}, biologically by the ability to
|
|
|
|
|
survive and produce offspring). Any individual in our algorithm thus
|
|
|
|
|
experience a biologically motivated life cycle of inheriting genes from
|
|
|
|
|
the parents, modified by mutations occurring, performing according to a
|
2017-10-29 20:44:10 +00:00
|
|
|
|
fitness--metric, and generating offspring based on this. Therefore each
|
2017-10-29 18:02:31 +00:00
|
|
|
|
iteration in the while--loop above is also often named generation.
|
|
|
|
|
|
|
|
|
|
One should note that there is a subtle difference between
|
|
|
|
|
\emph{fitness--function} and a so called
|
|
|
|
|
\emph{genotype--phenotype--mapping}. The first one directly applies the
|
|
|
|
|
\emph{genotype--phenotype--mapping} and evaluates the performance of an
|
|
|
|
|
individual, thus going directly from genes/parameters to
|
|
|
|
|
reproduction--probability/score. In a concrete example the
|
|
|
|
|
\emph{genotype} can be an arbitrary vector (the genes), the
|
|
|
|
|
\emph{phenotype} is then a deformed object, and the performance can be a
|
|
|
|
|
single measurement like an air--drag--coefficient. The
|
|
|
|
|
\emph{genotype--phenotype--mapping} would then just be the generation of
|
|
|
|
|
different objects from that starting--vector, whereas the
|
|
|
|
|
\emph{fitness--function} would go directly from such a starting--vector
|
|
|
|
|
to the coefficient that we want to optimize.
|
2017-10-10 10:24:15 +00:00
|
|
|
|
|
|
|
|
|
The main algorithm just repeats the following steps:
|
|
|
|
|
|
|
|
|
|
\begin{itemize}
|
|
|
|
|
\tightlist
|
|
|
|
|
\item
|
2017-10-10 11:53:17 +00:00
|
|
|
|
\textbf{Recombine} with a recombination--function
|
2017-10-14 11:45:22 +00:00
|
|
|
|
\(r : I^{\mu} \mapsto I^{\lambda}\) to generate \(\lambda\) new
|
|
|
|
|
individuals based on the characteristics of the \(\mu\) parents.\\
|
|
|
|
|
This makes sure that the next guess is close to the old guess.
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\item
|
2017-10-10 11:53:17 +00:00
|
|
|
|
\textbf{Mutate} with a mutation--function
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\(m : I^{\lambda} \mapsto I^{\lambda}\) to introduce new effects that
|
|
|
|
|
cannot be produced by mere recombination of the parents.\\
|
2017-10-14 11:45:22 +00:00
|
|
|
|
Typically this just adds minor defects to individual members of the
|
2017-10-10 10:24:15 +00:00
|
|
|
|
population like adding a random gaussian noise or amplifying/dampening
|
|
|
|
|
random parts.
|
|
|
|
|
\item
|
2017-10-10 11:53:17 +00:00
|
|
|
|
\textbf{Selection} takes a selection--function
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that
|
|
|
|
|
selects from the previously generated \(I^\lambda\) children and
|
|
|
|
|
optionally also the parents (denoted by the set \(Q\) in the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
algorithm) using the \emph{fitness--function} \(\Phi\). The result of
|
|
|
|
|
this operation is the next Population of \(\mu\) individuals.
|
2017-10-10 10:24:15 +00:00
|
|
|
|
\end{itemize}
|
|
|
|
|
|
|
|
|
|
All these functions can (and mostly do) have a lot of hidden parameters
|
2017-10-28 18:54:05 +00:00
|
|
|
|
that can be changed over time. A good overview of this is given in
|
|
|
|
|
\cite{eiben1999parameter}, so we only give a small excerpt here.
|
2017-10-27 12:31:55 +00:00
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
For example the mutation can consist of merely a single \(\sigma\)
|
|
|
|
|
determining the strength of the gaussian defects in every parameter ---
|
2017-10-29 18:02:31 +00:00
|
|
|
|
or giving a different \(\sigma\) to every component of those parameters.
|
|
|
|
|
An even more sophisticated example would be the \glqq 1/5 success
|
|
|
|
|
rule\grqq ~from \cite{rechenberg1973evolutionsstrategie}.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
2017-10-29 18:02:31 +00:00
|
|
|
|
Also in the selection--function it may not be wise to only take the
|
|
|
|
|
best--performing individuals, because it may be that the optimization
|
|
|
|
|
has to overcome a barrier of bad fitness to achieve a better local
|
|
|
|
|
optimum.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
Recombination also does not have to be mere random choosing of parents,
|
2017-10-29 18:02:31 +00:00
|
|
|
|
but can also take ancestry, distance of genes or groups of individuals
|
|
|
|
|
into account.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
\section{Advantages of evolutionary
|
|
|
|
|
algorithms}\label{advantages-of-evolutionary-algorithms}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:back:evogood}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
The main advantage of evolutionary algorithms is the ability to find
|
2017-10-10 11:53:17 +00:00
|
|
|
|
optima of general functions just with the help of a given
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{fitness--function}. Components and techniques for evolutionary
|
|
|
|
|
algorithms are specifically known to help with different problems
|
|
|
|
|
arising in the domain of optimization\cite{weise2012evolutionary}. An
|
|
|
|
|
overview of the typical problems are shown in figure \ref{fig:probhard}.
|
2017-10-10 11:53:17 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[!ht]
|
|
|
|
|
\includegraphics[width=\textwidth]{img/weise_fig3.png}
|
|
|
|
|
\caption{Fig.~3. taken from \cite{weise2012evolutionary}}
|
|
|
|
|
\label{fig:probhard}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
Most of the advantages stem from the fact that a gradient--based
|
2017-10-29 20:44:10 +00:00
|
|
|
|
procedure has usually only one point of observation from where it
|
|
|
|
|
evaluates the next steps, whereas an evolutionary strategy starts with a
|
|
|
|
|
population of guessed solutions. Because an evolutionary strategy can be
|
|
|
|
|
modified according to the problem--domain (i.e.~by the ideas given
|
|
|
|
|
above) it can also approximate very difficult problems in an efficient
|
|
|
|
|
manner and even self--tune parameters depending on the ancestry at
|
|
|
|
|
runtime\footnote{Some examples of this are explained in detail in
|
|
|
|
|
\cite{eiben1999parameter}}.
|
2017-10-27 12:31:55 +00:00
|
|
|
|
|
2017-10-10 11:53:17 +00:00
|
|
|
|
If an analytic best solution exists and is easily computable
|
2017-10-10 15:45:58 +00:00
|
|
|
|
(i.e.~because the error--function is convex) an evolutionary algorithm
|
|
|
|
|
is not the right choice. Although both converge to the same solution,
|
|
|
|
|
the analytic one is usually faster.
|
2017-10-10 11:53:17 +00:00
|
|
|
|
|
|
|
|
|
But in reality many problems have no analytic solution, because the
|
|
|
|
|
problem is either not convex or there are so many parameters that an
|
|
|
|
|
analytic solution (mostly meaning the equivalence to an exhaustive
|
2017-10-10 15:45:58 +00:00
|
|
|
|
search) is computationally not feasible. Here evolutionary optimization
|
2017-10-27 12:31:55 +00:00
|
|
|
|
has one more advantage as one can at least get suboptimal solutions
|
2017-10-29 18:02:31 +00:00
|
|
|
|
fast, which then refine over time and still converge to a decent
|
|
|
|
|
solution much faster than an exhaustive search.
|
2017-09-27 20:06:39 +00:00
|
|
|
|
|
2017-10-01 18:06:41 +00:00
|
|
|
|
\section{Criteria for the evolvability of linear
|
|
|
|
|
deformations}\label{criteria-for-the-evolvability-of-linear-deformations}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-07 23:07:37 +00:00
|
|
|
|
\label{sec:intro:rvi}
|
|
|
|
|
|
2017-10-14 11:45:22 +00:00
|
|
|
|
As we have established in chapter \ref{sec:back:ffd}, we can describe a
|
|
|
|
|
deformation by the formula \[
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\vec{S} = \vec{U}\vec{P}
|
|
|
|
|
\] where \(\vec{S}\) is a \(n \times d\) matrix of vertices\footnote{We
|
|
|
|
|
use \(\vec{S}\) in this notation, as we will use this parametrization
|
|
|
|
|
of a source--mesh to manipulate \(\vec{S}\) into a target--mesh
|
|
|
|
|
\(\vec{T}\) via \(\vec{P}\)}, \(\vec{U}\) are the (during
|
|
|
|
|
parametrization) calculated deformation--coefficients and \(P\) is a
|
|
|
|
|
\(m \times d\) matrix of control--points that we interact with during
|
|
|
|
|
deformation.
|
2017-10-14 11:45:22 +00:00
|
|
|
|
|
|
|
|
|
We can also think of the deformation in terms of differences from the
|
|
|
|
|
original coordinates \[
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\] which is isomorphic to the former due to the linearity of the
|
|
|
|
|
deformation. One can see in this way, that the way the deformation
|
2017-10-27 12:31:55 +00:00
|
|
|
|
behaves lies solely in the entries of \(\vec{U}\), which is why the
|
|
|
|
|
three criteria focus on this.
|
2017-10-11 17:53:01 +00:00
|
|
|
|
|
2017-10-01 18:06:41 +00:00
|
|
|
|
\subsection{Variability}\label{variability}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
In \cite{anrichterEvol} \emph{variability} is defined as
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\[\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},\]
|
|
|
|
|
whereby \(\vec{U}\) is the \(n \times m\) deformation--Matrix used to
|
2017-10-29 20:44:10 +00:00
|
|
|
|
map the \(m\) control--points onto the \(n\) vertices.
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
Given \(n = m\), an identical number of control--points and vertices,
|
2017-10-29 20:44:10 +00:00
|
|
|
|
this quotient will be \(=1\) if all control--points are independent of
|
2017-10-08 21:29:45 +00:00
|
|
|
|
each other and the solution is to trivially move every control--point
|
|
|
|
|
onto a target--point.
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because
|
2017-10-08 21:29:45 +00:00
|
|
|
|
there are only few control--points for many vertices, so \(m \ll n\).
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
This criterion should correlate to the degrees of freedom the given
|
|
|
|
|
parametrization has. This can be seen from the fact, that
|
|
|
|
|
\(\mathrm{rank}(\vec{U})\) is limited by \(\min(m,n)\) and --- as \(n\)
|
|
|
|
|
is constant --- can never exceed \(n\).
|
|
|
|
|
|
|
|
|
|
The rank itself is also interesting, as control--points could
|
|
|
|
|
theoretically be placed on top of each other or be linear dependent in
|
|
|
|
|
another way --- but will in both cases lower the rank below the number
|
|
|
|
|
of control--points \(m\) and are thus measurable by the
|
|
|
|
|
\emph{variability}.
|
|
|
|
|
|
2017-10-01 18:06:41 +00:00
|
|
|
|
\subsection{Regularity}\label{regularity}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\emph{Regularity} is defined\cite{anrichterEvol} as
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\[\mathrm{regularity}(\vec{U}) := \frac{1}{\kappa(\vec{U})} = \frac{\sigma_{min}}{\sigma_{max}}\]
|
2017-10-01 18:06:41 +00:00
|
|
|
|
where \(\sigma_{min}\) and \(\sigma_{max}\) are the smallest and
|
2017-10-08 21:29:45 +00:00
|
|
|
|
greatest right singular value of the deformation--matrix \(\vec{U}\).
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
|
|
|
|
As we deform the given Object only based on the parameters as
|
|
|
|
|
\(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that
|
|
|
|
|
\(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\).
|
|
|
|
|
The inversion of \(\kappa(\vec{U})\) is only performed to map the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
criterion--range to \([0..1]\), where \(1\) is the optimal value and
|
2017-10-01 18:06:41 +00:00
|
|
|
|
\(0\) is the worst value.
|
|
|
|
|
|
2017-10-16 20:45:29 +00:00
|
|
|
|
On the one hand this criterion should be characteristic for numeric
|
|
|
|
|
stability\cite[chapter 2.7]{golub2012matrix} and on the other hand for
|
|
|
|
|
the convergence speed of evolutionary algorithms\cite{anrichterEvol} as
|
|
|
|
|
it is tied to the notion of
|
2017-10-01 18:06:41 +00:00
|
|
|
|
locality\cite{weise2012evolutionary,thorhauer2014locality}.
|
|
|
|
|
|
|
|
|
|
\subsection{Improvement Potential}\label{improvement-potential}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
In contrast to the general nature of \emph{variability} and
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{regularity}, which are agnostic of the \emph{fitness--function} at
|
|
|
|
|
hand, the third criterion should reflect a notion of the potential for
|
2017-10-27 12:31:55 +00:00
|
|
|
|
optimization, taking a guess into account.
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
Most of the times some kind of gradient \(g\) is available to suggest a
|
|
|
|
|
direction worth pursuing; either from a previous iteration or by
|
|
|
|
|
educated guessing. We use this to guess how much change can be achieved
|
|
|
|
|
in the given direction.
|
2017-10-01 18:06:41 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
The definition for an \emph{improvement potential} \(P\)
|
2017-10-01 18:06:41 +00:00
|
|
|
|
is\cite{anrichterEvol}: \[
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\mathrm{potential}(\vec{U}) := 1 - \|(\vec{1} - \vec{UU}^+)\vec{G}\|^2_F
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\] given some approximate \(n \times d\) fitness--gradient \(\vec{G}\),
|
|
|
|
|
normalized to \(\|\vec{G}\|_F = 1\), whereby \(\|\cdot\|_F\) denotes the
|
2017-10-08 21:29:45 +00:00
|
|
|
|
Frobenius--Norm.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\chapter{\texorpdfstring{Implementation of
|
|
|
|
|
\acf{FFD}}{Implementation of }}\label{implementation-of}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:impl}
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
The general formulation of B--Splines has two free parameters \(d\) and
|
2017-10-02 20:36:46 +00:00
|
|
|
|
\(\tau\) which must be chosen beforehand.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
As we usually work with regular grids in our \ac{FFD} we define \(\tau\)
|
2017-10-02 20:36:46 +00:00
|
|
|
|
statically as \(\tau_i = \nicefrac{i}{n}\) whereby \(n\) is the number
|
2017-10-08 21:29:45 +00:00
|
|
|
|
of control--points in that direction.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\(d\) defines the \emph{degree} of the B--Spline--Function (the number
|
|
|
|
|
of times this function is differentiable) and for our purposes we fix
|
|
|
|
|
\(d\) to \(3\), but give the formulas for the general case so it can be
|
2017-10-02 19:56:06 +00:00
|
|
|
|
adapted quite freely.
|
|
|
|
|
|
|
|
|
|
\section{\texorpdfstring{Adaption of
|
|
|
|
|
\ac{FFD}}{Adaption of }}\label{adaption-of}
|
|
|
|
|
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\label{sec:ffd:adapt}
|
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
As we have established in Chapter \ref{sec:back:ffd} we can define an
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\ac{FFD}--displacement as
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
\begin{equation}
|
|
|
|
|
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
|
|
|
|
|
\end{equation}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Note that we only sum up the \(\Delta\)--displacements in the
|
|
|
|
|
control--points \(c_i\) to get the change in position of the point we
|
|
|
|
|
are interested in.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
In this way every deformed vertex is defined by \[
|
|
|
|
|
\textrm{Deform}(v_x) = v_x + \Delta_x(u)
|
|
|
|
|
\] with \(u \in [0..1[\) being the variable that connects the
|
2017-10-08 21:29:45 +00:00
|
|
|
|
high--detailed vertex--mesh to the low--detailed control--grid. To
|
|
|
|
|
actually calculate the new position of the vertex we first have to
|
|
|
|
|
calculate the \(u\)--value for each vertex. This is achieved by finding
|
|
|
|
|
out the parametrization of \(v\) in terms of \(c_i\) \[
|
2017-10-02 20:36:46 +00:00
|
|
|
|
v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i
|
|
|
|
|
\] so we can minimize the error between those two: \[
|
|
|
|
|
\underset{u}{\argmin}\,Err(u,v_x) = \underset{u}{\argmin}\,2 \cdot \|v_x - \sum_i N_{i,d,\tau_i}(u) c_i\|^2_2
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\] As this error--term is quadratic we just derive by \(u\) yielding \[
|
|
|
|
|
\begin{array}{rl}
|
|
|
|
|
\frac{\partial}{\partial u} & v_x - \sum_i N_{i,d,\tau_i}(u) c_i \\
|
|
|
|
|
= & - \sum_i \left( \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u) \right) c_i
|
|
|
|
|
\end{array}
|
|
|
|
|
\] and do a gradient--descend to approximate the value of \(u\) up to an
|
2017-10-02 19:56:06 +00:00
|
|
|
|
\(\epsilon\) of \(0.0001\).
|
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which
|
|
|
|
|
converges into the least--squares solution. An exact solution of this
|
2017-10-29 20:44:10 +00:00
|
|
|
|
problem is impossible most of the time, because we usually have way more
|
|
|
|
|
vertices than control--points (\(\#v~\gg~\#c\)).
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
\section{\texorpdfstring{Adaption of \ac{FFD} for a
|
2017-10-08 21:29:45 +00:00
|
|
|
|
3D--Mesh}{Adaption of for a 3D--Mesh}}\label{adaption-of-for-a-3dmesh}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-09-06 15:07:46 +00:00
|
|
|
|
\label{3dffd}
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
This is a straightforward extension of the 1D--method presented in the
|
2017-10-02 19:56:06 +00:00
|
|
|
|
last chapter. But this time things get a bit more complicated. As we
|
2017-10-08 21:29:45 +00:00
|
|
|
|
have a 3--dimensional grid we may have a different amount of
|
|
|
|
|
control--points in each direction.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Given \(n,m,o\) control--points in \(x,y,z\)--direction each Point on
|
|
|
|
|
the curve is defined by
|
2017-10-02 20:36:46 +00:00
|
|
|
|
\[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\]
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
In this case we have three different B--Splines (one for each dimension)
|
2017-10-02 19:56:06 +00:00
|
|
|
|
and also 3 variables \(u,v,w\) for each vertex we want to approximate.
|
|
|
|
|
|
|
|
|
|
Given a target vertex \(\vec{p}^*\) and an initial guess
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\(\vec{p}=V(u,v,w)\) we define the error--function for the
|
|
|
|
|
gradient--descent as:
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
\[Err(u,v,w,\vec{p}^{*}) = \vec{p}^{*} - V(u,v,w)\]
|
|
|
|
|
|
|
|
|
|
And the partial version for just one direction as
|
|
|
|
|
|
2017-10-02 20:36:46 +00:00
|
|
|
|
\[Err_x(u,v,w,\vec{p}^{*}) = p^{*}_x - \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \]
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
|
|
|
|
To solve this we derive partially, like before:
|
|
|
|
|
|
|
|
|
|
\[
|
|
|
|
|
\begin{array}{rl}
|
2017-10-02 20:36:46 +00:00
|
|
|
|
\displaystyle \frac{\partial Err_x}{\partial u} & p^{*}_x - \displaystyle \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \\
|
2017-10-07 23:07:37 +00:00
|
|
|
|
= & \displaystyle - \sum_i \sum_j \sum_k N'_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x
|
2017-10-02 19:56:06 +00:00
|
|
|
|
\end{array}
|
|
|
|
|
\]
|
|
|
|
|
|
|
|
|
|
The other partial derivatives follow the same pattern yielding the
|
|
|
|
|
Jacobian:
|
|
|
|
|
|
|
|
|
|
\[
|
|
|
|
|
J(Err(u,v,w)) =
|
|
|
|
|
\left(
|
|
|
|
|
\begin{array}{ccc}
|
|
|
|
|
\frac{\partial Err_x}{\partial u} & \frac{\partial Err_x}{\partial v} & \frac{\partial Err_x}{\partial w} \\
|
|
|
|
|
\frac{\partial Err_y}{\partial u} & \frac{\partial Err_y}{\partial v} & \frac{\partial Err_y}{\partial w} \\
|
|
|
|
|
\frac{\partial Err_z}{\partial u} & \frac{\partial Err_z}{\partial v} & \frac{\partial Err_z}{\partial w}
|
|
|
|
|
\end{array}
|
|
|
|
|
\right)
|
2017-10-07 23:07:37 +00:00
|
|
|
|
\] \[
|
|
|
|
|
\scriptsize
|
|
|
|
|
=
|
|
|
|
|
\left(
|
|
|
|
|
\begin{array}{ccc}
|
|
|
|
|
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_x \\
|
|
|
|
|
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_y \\
|
|
|
|
|
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_z
|
|
|
|
|
\end{array}
|
|
|
|
|
\right)
|
2017-10-02 19:56:06 +00:00
|
|
|
|
\]
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
With the Gauss--Newton algorithm we iterate via the formula
|
2017-10-02 19:56:06 +00:00
|
|
|
|
\[J(Err(u,v,w)) \cdot \Delta \left( \begin{array}{c} u \\ v \\ w \end{array} \right) = -Err(u,v,w)\]
|
2017-10-29 18:02:31 +00:00
|
|
|
|
and use Cramer's rule for inverting the small Jacobian and solving this
|
2017-10-02 19:56:06 +00:00
|
|
|
|
system of linear equations.
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
As there is no strict upper bound of the number of iterations for this
|
|
|
|
|
algorithm, we just iterate it long enough to be within the given
|
|
|
|
|
\(\epsilon\)--error above. This takes --- depending on the shape of the
|
|
|
|
|
object and the grid --- about \(3\) to \(5\) iterations that we observed
|
|
|
|
|
in practice.
|
|
|
|
|
|
|
|
|
|
Another issue that we observed in our implementation is, that multiple
|
|
|
|
|
local optima may exist on self--intersecting grids. We solve this
|
|
|
|
|
problem by defining self--intersecting grids to be \emph{invalid} and do
|
|
|
|
|
not test any of them.
|
|
|
|
|
|
|
|
|
|
This is not such a big problem as it sounds at first, as
|
|
|
|
|
self--intersections mean, that control--points being further away from a
|
|
|
|
|
given vertex have more influence over the deformation than
|
|
|
|
|
control--points closer to this vertex. Also this contradicts the notion
|
|
|
|
|
of locality that we want to achieve and deemed beneficial for a good
|
|
|
|
|
behaviour of the evolutionary algorithm.
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
\section{Deformation Grid}\label{deformation-grid}
|
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{sec:impl:grid}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
As mentioned in chapter \ref{sec:back:evo}, the way of choosing the
|
|
|
|
|
representation to map the general problem (mesh--fitting/optimization in
|
2017-10-29 20:44:10 +00:00
|
|
|
|
our case) into a parameter--space is very important for the quality and
|
2017-10-10 15:45:58 +00:00
|
|
|
|
runtime of evolutionary algorithms\cite{Rothlauf2006}.
|
|
|
|
|
|
|
|
|
|
Because our control--points are arranged in a grid, we can accurately
|
|
|
|
|
represent each vertex--point inside the grids volume with proper
|
|
|
|
|
B--Spline--coefficients between \([0,1[\) and --- as a consequence ---
|
2017-10-29 20:44:10 +00:00
|
|
|
|
we have to embed our object into it (or create constant
|
|
|
|
|
``dummy''--points outside).
|
2017-10-10 15:45:58 +00:00
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
The great advantage of B--Splines is the local, direct impact of each
|
2017-10-10 15:45:58 +00:00
|
|
|
|
control point without having a \(1:1\)--correlation, and a smooth
|
|
|
|
|
deformation. While the advantages are great, the issues arise from the
|
2017-10-27 12:31:55 +00:00
|
|
|
|
problem to decide where to place the control--points and how many to
|
|
|
|
|
place at all.
|
2017-10-10 15:45:58 +00:00
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\begin{figure}[!tbh]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics{img/enoughCP.png}
|
|
|
|
|
\caption[Example of a high resolution control--grid]{A high resolution
|
|
|
|
|
($10 \times 10$) of control--points over a circle. Yellow/green points
|
|
|
|
|
contribute to the parametrization, red points don't.\newline
|
|
|
|
|
An Example--point (blue) is solely determined by the position of the green
|
|
|
|
|
control--points.}
|
|
|
|
|
\label{fig:enoughCP}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-10 15:45:58 +00:00
|
|
|
|
One would normally think, that the more control--points you add, the
|
|
|
|
|
better the result will be, but this is not the case for our B--Splines.
|
2017-10-27 12:31:55 +00:00
|
|
|
|
Given any point \(\vec{p}\) only the \(2 \cdot (d-1)\) control--points
|
2017-10-10 15:45:58 +00:00
|
|
|
|
contribute to the parametrization of that point\footnote{Normally these
|
2017-10-29 20:44:10 +00:00
|
|
|
|
are \(d-1\) to each side, but at the boundaries border points get used
|
|
|
|
|
multiple times to meet the number of points required}. This means,
|
|
|
|
|
that a high resolution can have many control--points that are not
|
2017-10-10 15:45:58 +00:00
|
|
|
|
contributing to any point on the surface and are thus completely
|
|
|
|
|
irrelevant to the solution.
|
|
|
|
|
|
|
|
|
|
We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
red central points are not relevant for the parametrization of the
|
2017-10-27 12:31:55 +00:00
|
|
|
|
circle. This leads to artefacts in the deformation--matrix \(\vec{U}\),
|
|
|
|
|
as the columns corresponding to those control--points are \(0\).
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
This also leads to useless increased complexity, as the parameters
|
2017-10-27 12:31:55 +00:00
|
|
|
|
corresponding to those points will never have any effect, but a naive
|
|
|
|
|
algorithm will still try to optimize them yielding numeric artefacts in
|
|
|
|
|
the best and non--terminating or ill--defined solutions\footnote{One
|
|
|
|
|
example would be, when parts of an algorithm depend on the inverse of
|
|
|
|
|
the minimal right singular value leading to a division by \(0\).} at
|
|
|
|
|
worst.
|
|
|
|
|
|
|
|
|
|
One can of course neglect those columns and their corresponding
|
|
|
|
|
control--points, but this raises the question why they were introduced
|
|
|
|
|
in the first place. We will address this in a special scenario in
|
|
|
|
|
\ref{sec:res:3d:var}.
|
2017-10-10 15:45:58 +00:00
|
|
|
|
|
2017-10-17 15:12:33 +00:00
|
|
|
|
For our tests we chose different uniformly sized grids and added noise
|
2017-10-29 20:44:10 +00:00
|
|
|
|
onto each control--point\footnote{For the special case of the outer
|
|
|
|
|
layer we only applied noise away from the object, so the object is
|
|
|
|
|
still confined in the convex hull of the control--points.} to simulate
|
|
|
|
|
different starting--conditions.
|
2017-10-02 19:56:06 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\chapter{\texorpdfstring{Scenarios for testing evolvability--criteria
|
2017-10-07 23:07:37 +00:00
|
|
|
|
using
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\ac{FFD}}{Scenarios for testing evolvability--criteria using }}\label{scenarios-for-testing-evolvabilitycriteria-using}
|
2017-10-07 23:07:37 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:eval}
|
|
|
|
|
|
2017-10-15 18:00:59 +00:00
|
|
|
|
In our experiments we use the same two testing--scenarios, that were
|
2017-10-29 20:44:10 +00:00
|
|
|
|
also used by Richter et al.\cite{anrichterEvol} The first scenario
|
|
|
|
|
deforms a plane into a shape originally defined by Giannelli et
|
|
|
|
|
al.\cite{giannelli2012thb}, where we setup control--points in a
|
|
|
|
|
2--dimensional manner and merely deform in the height--coordinate to get
|
|
|
|
|
the resulting shape.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-15 18:00:59 +00:00
|
|
|
|
In the second scenario we increase the degrees of freedom significantly
|
2017-10-27 12:31:55 +00:00
|
|
|
|
by using a 3--dimensional control--grid to deform a sphere into a face,
|
|
|
|
|
so each control point has three degrees of freedom in contrast to first
|
2017-10-15 18:00:59 +00:00
|
|
|
|
scenario.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\section{Test Scenario: 1D Function
|
|
|
|
|
Approximation}\label{test-scenario-1d-function-approximation}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-15 18:00:59 +00:00
|
|
|
|
In this scenario we used the shape defined by Giannelli et
|
|
|
|
|
al.\cite{giannelli2012thb}, which is also used by Richter et
|
|
|
|
|
al.\cite{anrichterEvol} using the same discretization to
|
|
|
|
|
\(150 \times 150\) points for a total of \(n = 22\,500\) vertices. The
|
2017-10-25 18:40:51 +00:00
|
|
|
|
shape is given by the following definition
|
|
|
|
|
|
|
|
|
|
\begin{equation}
|
2017-10-17 15:12:33 +00:00
|
|
|
|
t(x,y) =
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\begin{cases}
|
|
|
|
|
0.5 \cos(4\pi \cdot q^{0.5}) + 0.5 & q(x,y) < \frac{1}{16},\\
|
|
|
|
|
2(y-x) & 0 < y-x < 0.5,\\
|
|
|
|
|
1 & 0.5 < y - x
|
|
|
|
|
\end{cases}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\end{equation}
|
|
|
|
|
|
|
|
|
|
with \((x,y) \in [0,2] \times [0,1]\) and
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\(q(x,y)=(x-1.5)^2 + (y-0.5)^2\), which we have visualized in figure
|
|
|
|
|
\ref{fig:1dtarget}.
|
|
|
|
|
|
2017-10-17 15:12:33 +00:00
|
|
|
|
\begin{figure}[ht]
|
2017-10-16 20:45:29 +00:00
|
|
|
|
\begin{center}
|
|
|
|
|
\includegraphics[width=0.7\textwidth]{img/1dtarget.png}
|
2017-10-17 15:12:33 +00:00
|
|
|
|
\end{center}
|
2017-10-17 15:34:01 +00:00
|
|
|
|
\caption[The 1D--target--shape]{The target--shape for our 1--dimensional optimization--scenario
|
2017-10-16 20:45:29 +00:00
|
|
|
|
including a wireframe--overlay of the vertices.}
|
|
|
|
|
\label{fig:1dtarget}
|
2017-10-17 15:12:33 +00:00
|
|
|
|
\end{figure}
|
2017-10-16 20:45:29 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
As the starting--plane we used the same shape, but set all
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\(z\)--coordinates to \(0\), yielding a flat plane, which is partially
|
|
|
|
|
already correct.
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
Regarding the \emph{fitness--function} \(\mathrm{f}(\vec{p})\), we use
|
|
|
|
|
the very simple approach of calculating the squared distances for each
|
2017-10-25 18:40:51 +00:00
|
|
|
|
corresponding vertex
|
|
|
|
|
|
|
|
|
|
\begin{equation}
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\mathrm{f}(\vec{p}) = \sum_{i=1}^{n} \|(\vec{Up})_i - t_i\|_2^2 = \|\vec{Up} - \vec{t}\|^2 \rightarrow \min
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\end{equation}
|
|
|
|
|
|
|
|
|
|
where \(t_i\) are the respective target--vertices to the parametrized
|
2017-10-17 15:12:33 +00:00
|
|
|
|
source--vertices\footnote{The parametrization is encoded in \(\vec{U}\)
|
2017-10-29 20:44:10 +00:00
|
|
|
|
and the initial position of the control--points. See
|
2017-10-15 18:00:59 +00:00
|
|
|
|
\ref{sec:ffd:adapt}} with the current deformation--parameters
|
|
|
|
|
\(\vec{p} = (p_1,\dots, p_m)\). We can do this
|
|
|
|
|
one--to--one--correspondence because we have exactly the same number of
|
2017-10-29 20:44:10 +00:00
|
|
|
|
source and target--vertices do to our setup of just flattening the
|
2017-10-15 18:00:59 +00:00
|
|
|
|
object.
|
|
|
|
|
|
|
|
|
|
This formula is also the least--squares approximation error for which we
|
2017-10-17 15:12:33 +00:00
|
|
|
|
can compute the analytic solution \(\vec{p^{*}} = \vec{U^+}\vec{t}\),
|
2017-10-15 18:00:59 +00:00
|
|
|
|
yielding us the correct gradient in which the evolutionary optimizer
|
|
|
|
|
should move.
|
|
|
|
|
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\section{Test Scenario: 3D Function
|
|
|
|
|
Approximation}\label{test-scenario-3d-function-approximation}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{sec:test:3dfa} Opposed to the 1--dimensional scenario before, the
|
|
|
|
|
3--dimensional scenario is much more complex --- not only because we
|
2017-10-27 12:31:55 +00:00
|
|
|
|
have more degrees of freedom on each control point, but also, because
|
|
|
|
|
the \emph{fitness--function} we will use has no known analytic solution
|
|
|
|
|
and multiple local minima.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-16 20:45:29 +00:00
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\begin{center}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\includegraphics[width=0.9\textwidth]{img/3dtarget.png}
|
2017-10-16 20:45:29 +00:00
|
|
|
|
\end{center}
|
2017-10-17 15:34:01 +00:00
|
|
|
|
\caption[3D source and target meshes]{\newline
|
2017-10-16 20:45:29 +00:00
|
|
|
|
Left: The sphere we start from with 10\,807 vertices\newline
|
|
|
|
|
Right: The face we want to deform the sphere into with 12\,024 vertices.}
|
|
|
|
|
\label{fig:3dtarget}
|
|
|
|
|
\end{figure}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-16 20:45:29 +00:00
|
|
|
|
First of all we introduce the set up: We have given a triangulated model
|
2017-10-25 18:40:51 +00:00
|
|
|
|
of a sphere consisting of \(10\,807\) vertices, that we want to deform
|
|
|
|
|
into a the target--model of a face with a total of \(12\,024\) vertices.
|
|
|
|
|
Both of these Models can be seen in figure \ref{fig:3dtarget}.
|
2017-10-16 20:45:29 +00:00
|
|
|
|
|
|
|
|
|
Opposed to the 1D--case we cannot map the source and target--vertices in
|
|
|
|
|
a one--to--one--correspondence, which we especially need for the
|
|
|
|
|
approximation of the fitting--error. Hence we state that the error of
|
2017-10-29 20:44:10 +00:00
|
|
|
|
one vertex is the distance to the closest vertex of the respective other
|
|
|
|
|
model and sum up the error from the source and target.
|
2017-10-16 20:45:29 +00:00
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
We therefore define the \emph{fitness--function} to be:
|
|
|
|
|
|
|
|
|
|
\begin{equation}
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} -
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}}
|
2017-10-16 20:45:29 +00:00
|
|
|
|
+ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} -
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}}
|
2017-10-17 15:12:33 +00:00
|
|
|
|
+ \lambda \cdot \textrm{regularization}(\vec{P})
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{eq:fit3d}
|
|
|
|
|
\end{equation}
|
|
|
|
|
|
|
|
|
|
where \(\vec{c_T(s_i)}\) denotes the target--vertex that is
|
2017-10-16 20:45:29 +00:00
|
|
|
|
corresponding to the source--vertex \(\vec{s_i}\) and \(\vec{c_S(t_i)}\)
|
|
|
|
|
denotes the source--vertex that corresponds to the target--vertex
|
|
|
|
|
\(\vec{t_i}\). Note that the target--vertices are given and fixed by the
|
|
|
|
|
target--model of the face we want to deform into, whereas the
|
|
|
|
|
source--vertices vary depending on the chosen parameters \(\vec{P}\), as
|
|
|
|
|
those get calculated by the previously introduces formula
|
|
|
|
|
\(\vec{S} = \vec{UP}\) with \(\vec{S}\) being the \(n \times 3\)--matrix
|
|
|
|
|
of source--vertices, \(\vec{U}\) the \(n \times m\)--matrix of
|
|
|
|
|
calculated coefficients for the \ac{FFD} --- analog to the 1D case ---
|
|
|
|
|
and finally \(\vec{P}\) being the \(m \times 3\)--matrix of the
|
|
|
|
|
control--grid defining the whole deformation.
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
As regularization--term we add a weighted Laplacian of the deformation
|
2017-10-17 15:12:33 +00:00
|
|
|
|
that has been used before by Aschenbach et
|
|
|
|
|
al.\cite[Section 3.2]{aschenbach2015} on similar models and was shown to
|
2017-10-25 18:40:51 +00:00
|
|
|
|
lead to a more precise fit. The Laplacian
|
|
|
|
|
|
|
|
|
|
\begin{equation}
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\mathrm{regularization}(\vec{P}) = \frac{1}{\sum_i A_i} \sum_{i=1}^n A_i \cdot \left( \sum_{\vec{s}_j \in \mathcal{N}(\vec{s}_i)} w_j \cdot \|\Delta \vec{s}_j - \Delta \vec{s}_i\|^2 \right)
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{eq:reg3d}
|
|
|
|
|
\end{equation}
|
|
|
|
|
|
|
|
|
|
is determined by the cotangent weighted displacement \(w_j\) of the to
|
|
|
|
|
\(s_i\) connected vertices \(\mathcal{N}(s_i)\) and \(A_i\) is the
|
2017-10-17 15:12:33 +00:00
|
|
|
|
Voronoi--area of the corresponding vertex \(\vec{s_i}\). We leave out
|
|
|
|
|
the \(\vec{R}_i\)--term from the original paper as our deformation is
|
|
|
|
|
merely linear.
|
|
|
|
|
|
|
|
|
|
This regularization--weight gives us a measure of stiffness for the
|
|
|
|
|
material that we will influence via the \(\lambda\)--coefficient to
|
|
|
|
|
start out with a stiff material that will get more flexible per
|
2017-10-28 18:54:05 +00:00
|
|
|
|
iteration. As a side--effect this also limits the effects of
|
|
|
|
|
overagressive movement of the control--points in the beginning of the
|
|
|
|
|
fitting process and thus should limit the generation of ill--defined
|
|
|
|
|
grids mentioned in section \ref{sec:impl:grid}.
|
2017-10-16 20:45:29 +00:00
|
|
|
|
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\chapter{Evaluation of Scenarios}\label{evaluation-of-scenarios}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:res}
|
|
|
|
|
|
2017-10-24 11:22:20 +00:00
|
|
|
|
To compare our results to the ones given by Richter et
|
|
|
|
|
al.\cite{anrichterEvol}, we also use Spearman's rank correlation
|
|
|
|
|
coefficient. Opposed to other popular coefficients, like the Pearson
|
|
|
|
|
correlation coefficient, which measures a linear relationship between
|
2017-10-29 20:44:10 +00:00
|
|
|
|
variables, the Spearman's coefficient assesses \glqq how well an
|
2017-10-27 12:31:55 +00:00
|
|
|
|
arbitrary monotonic function can describe the relationship between two
|
2017-10-24 11:22:20 +00:00
|
|
|
|
variables, without making any assumptions about the frequency
|
|
|
|
|
distribution of the variables\grqq\cite{hauke2011comparison}.
|
|
|
|
|
|
|
|
|
|
As we don't have any prior knowledge if any of the criteria is linear
|
|
|
|
|
and we are just interested in a monotonic relation between the criteria
|
|
|
|
|
and their predictive power, the Spearman's coefficient seems to fit out
|
2017-10-27 12:31:55 +00:00
|
|
|
|
scenario best and was also used before by Richter et
|
|
|
|
|
al.\cite{anrichterEvol}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
For interpretation of these values we follow the same interpretation
|
|
|
|
|
used in \cite{anrichterEvol}, based on \cite{weir2015spearman}: The
|
|
|
|
|
coefficient intervals \(r_S \in [0,0.2[\), \([0.2,0.4[\), \([0.4,0.6[\),
|
|
|
|
|
\([0.6,0.8[\), and \([0.8,1]\) are classified as \emph{very weak},
|
|
|
|
|
\emph{weak}, \emph{moderate}, \emph{strong} and \emph{very strong}. We
|
2017-10-27 12:31:55 +00:00
|
|
|
|
interpret p--values smaller than \(0.01\) as \emph{significant} and cut
|
2017-10-24 11:22:20 +00:00
|
|
|
|
off the precision of p--values after four decimal digits (thus often
|
|
|
|
|
having a p--value of \(0\) given for p--values \(< 10^{-4}\)).
|
|
|
|
|
|
|
|
|
|
As we are looking for anti--correlation (i.e.~our criterion should be
|
|
|
|
|
maximized indicating a minimal result in --- for example --- the
|
|
|
|
|
reconstruction--error) instead of correlation we flip the sign of the
|
|
|
|
|
correlation--coefficient for readability and to have the
|
|
|
|
|
correlation--coefficients be in the classification--range given above.
|
|
|
|
|
|
2017-10-29 18:02:31 +00:00
|
|
|
|
For the evolutionary optimization we employ the \afc{CMA--ES} of the
|
|
|
|
|
shark3.1 library \cite{shark08}, as this algorithm was used by
|
|
|
|
|
\cite{anrichterEvol} as well. We leave the parameters at their sensible
|
|
|
|
|
defaults as further explained in
|
|
|
|
|
\cite[Appendix~A: Table~1]{hansen2016cma}.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\section{Procedure: 1D Function
|
|
|
|
|
Approximation}\label{procedure-1d-function-approximation}
|
|
|
|
|
|
|
|
|
|
\label{sec:proc:1d}
|
|
|
|
|
|
|
|
|
|
For our setup we first compute the coefficients of the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
deformation--matrix and use the formulas for \emph{variability} and
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\emph{regularity} to get our predictions. Afterwards we solve the
|
|
|
|
|
problem analytically to get the (normalized) correct gradient that we
|
2017-10-29 20:44:10 +00:00
|
|
|
|
use as guess for the \emph{improvement potential}. To further test the
|
|
|
|
|
\emph{improvement potential} we also consider a distorted gradient
|
|
|
|
|
\(\vec{g}_{\mathrm{d}}\): \[
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\] where \(\mathbb{1}\) is the vector consisting of \(1\) in every
|
2017-10-28 18:54:05 +00:00
|
|
|
|
dimension, \(\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}\) is the
|
|
|
|
|
calculated correct gradient, and \(\mu\) is used to blend between
|
|
|
|
|
\(\vec{g}_\mathrm{c}\) and \(\mathbb{1}\). As we always start with a
|
2017-10-29 20:44:10 +00:00
|
|
|
|
gradient of \(p = \mathbb{0}\) this means we can shorten the definition
|
|
|
|
|
of \(\vec{g}_\mathrm{c}\) to \(\vec{g}_\mathrm{c} = \vec{p^{*}}\).
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\begin{center}
|
|
|
|
|
\includegraphics[width=\textwidth]{img/example1d_grid.png}
|
|
|
|
|
\end{center}
|
|
|
|
|
\caption[Example of a 1D--grid]{\newline Left: A regular $7 \times 4$--grid\newline Right: The same grid after a
|
|
|
|
|
random distortion to generate a testcase.}
|
|
|
|
|
\label{fig:example1d_grid}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
We then set up a regular 2--dimensional grid around the object with the
|
|
|
|
|
desired grid resolutions. To generate a testcase we then move the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
grid--vertices randomly inside the x--y--plane. As self--intersecting
|
|
|
|
|
grids get tricky to solve with our implemented newtons--method (see
|
|
|
|
|
section \ref{3dffd}) we avoid the generation of such self--intersecting
|
|
|
|
|
grids for our testcases.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
To achieve that we generated a gaussian distributed number with
|
|
|
|
|
\(\mu = 0, \sigma=0.25\) and clamped it to the range \([-0.25,0.25]\).
|
|
|
|
|
We chose such an \(r \in [-0.25,0.25]\) per dimension and moved the
|
|
|
|
|
control--points by that factor towards their respective
|
|
|
|
|
neighbours\footnote{Note: On the Edges this displacement is only applied
|
|
|
|
|
outwards by flipping the sign of \(r\), if appropriate.}.
|
|
|
|
|
|
|
|
|
|
In other words we set
|
|
|
|
|
|
|
|
|
|
\begin{equation*}
|
|
|
|
|
p_i =
|
|
|
|
|
\begin{cases}
|
|
|
|
|
p_i + (p_i - p_{i-1}) \cdot r, & \textrm{if } r \textrm{ negative} \\
|
|
|
|
|
p_i + (p_{i+1} - p_i) \cdot r, & \textrm{if } r \textrm{ positive}
|
|
|
|
|
\end{cases}
|
|
|
|
|
\end{equation*}
|
|
|
|
|
|
|
|
|
|
in each dimension separately.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
An Example of such a testcase can be seen for a \(7 \times 4\)--grid in
|
|
|
|
|
figure \ref{fig:example1d_grid}.
|
|
|
|
|
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\section{Results of 1D Function
|
|
|
|
|
Approximation}\label{results-of-1d-function-approximation}
|
|
|
|
|
|
|
|
|
|
In the case of our 1D--Optimization--problem, we have the luxury of
|
|
|
|
|
knowing the analytical solution to the given problem--set. We use this
|
|
|
|
|
to experimentally evaluate the quality criteria we introduced before. As
|
|
|
|
|
an evolutional optimization is partially a random process, we use the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
analytical solution as a stopping--criteria. We measure the convergence
|
2017-10-24 11:22:20 +00:00
|
|
|
|
speed as number of iterations the evolutional algorithm needed to get
|
2017-10-27 12:31:55 +00:00
|
|
|
|
within \(1.05 \times\) of the optimal solution.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
We used different regular grids that we manipulated as explained in
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Section \ref{sec:proc:1d} with a different number of control--points. As
|
2017-10-24 11:22:20 +00:00
|
|
|
|
our grids have to be the product of two integers, we compared a
|
|
|
|
|
\(5 \times 5\)--grid with \(25\) control--points to a \(4 \times 7\) and
|
|
|
|
|
\(7 \times 4\)--grid with \(28\) control--points. This was done to
|
2017-10-27 12:31:55 +00:00
|
|
|
|
measure the impact an \glqq improper\grqq ~ setup could have and how
|
|
|
|
|
well this is displayed in the criteria we are examining.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
Additionally we also measured the effect of increasing the total
|
|
|
|
|
resolution of the grid by taking a closer look at \(5 \times 5\),
|
|
|
|
|
\(7 \times 7\) and \(10 \times 10\) grids.
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\subsection{Variability}\label{variability-1}
|
|
|
|
|
|
|
|
|
|
\begin{figure}[tbh]
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=0.7\textwidth]{img/evolution1d/variability_boxplot.png}
|
|
|
|
|
\caption[1D Fitting Errors for various grids]{The squared error for the various
|
|
|
|
|
grids we examined.\newline
|
|
|
|
|
Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{fig:1dvar}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{Variability} should characterize the potential for design space
|
2017-10-24 11:22:20 +00:00
|
|
|
|
exploration and is defined in terms of the normalized rank of the
|
|
|
|
|
deformation matrix \(\vec{U}\):
|
|
|
|
|
\(V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}\), whereby \(n\) is the
|
|
|
|
|
number of vertices. As all our tested matrices had a constant rank
|
|
|
|
|
(being \(m = x \cdot y\) for a \(x \times y\) grid), we have merely
|
2017-10-29 18:02:31 +00:00
|
|
|
|
plotted the errors in the box plot in figure \ref{fig:1dvar}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
It is also noticeable, that although the \(7 \times 4\) and
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\(4 \times 7\) grids have a higher \emph{variability}, they perform not
|
|
|
|
|
better than the \(5 \times 5\) grid. Also the \(7 \times 4\) and
|
|
|
|
|
\(4 \times 7\) grids differ distinctly from each other with a
|
|
|
|
|
mean\(\pm\)sigma of \(233.09 \pm 12.32\) for the former and
|
|
|
|
|
\(286.32 \pm 22.36\) for the latter, although they have the same number
|
|
|
|
|
of control--points. This is an indication of an impact a proper or
|
|
|
|
|
improper grid--setup can have. We do not draw scientific conclusions
|
|
|
|
|
from these findings, as more research on non--squared grids seem
|
|
|
|
|
necessary.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
Leaving the issue of the grid--layout aside we focused on grids having
|
|
|
|
|
the same number of prototypes in every dimension. For the
|
|
|
|
|
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids we found a
|
|
|
|
|
\emph{very strong} correlation (\(-r_S = 0.94, p = 0\)) between the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{variability} and the evolutionary error.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
\subsection{Regularity}\label{regularity-1}
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\begin{figure}[tbh]
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Improvement potential and regularity against iterations]{\newline
|
|
|
|
|
Left: *Improvement potential* against number of iterations until convergence\newline
|
|
|
|
|
Right: *Regularity* against number of iterations until convergence\newline
|
2017-10-25 18:40:51 +00:00
|
|
|
|
Coloured by their grid--resolution, both with a linear fit over the whole
|
|
|
|
|
dataset.}
|
|
|
|
|
\label{fig:1dreg}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
\begin{table}[b]
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\centering
|
|
|
|
|
\begin{tabular}{c|c|c|c|c}
|
|
|
|
|
$5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\
|
|
|
|
|
\hline
|
|
|
|
|
$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$)
|
|
|
|
|
\end{tabular}
|
2017-10-30 08:09:44 +00:00
|
|
|
|
\caption[Correlation 1D *regularity* against iterations]{Negated Spearman's correlation (and p--values)
|
2017-10-29 20:44:10 +00:00
|
|
|
|
between *regularity* and number of iterations for the 1D function approximation
|
2017-10-25 18:40:51 +00:00
|
|
|
|
problem.
|
|
|
|
|
\newline Note: Not significant results are marked in \textcolor{red}{red}.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
}
|
|
|
|
|
\label{tab:1dreg}
|
|
|
|
|
\end{table}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{Regularity} should correspond to the convergence speed (measured
|
|
|
|
|
in iteration--steps of the evolutionary algorithm), and is computed as
|
2017-10-24 11:22:20 +00:00
|
|
|
|
inverse condition number \(\kappa(\vec{U})\) of the deformation--matrix.
|
|
|
|
|
|
|
|
|
|
As can be seen from table \ref{tab:1dreg}, we could only show a
|
|
|
|
|
\emph{weak} correlation in the case of a \(5 \times 5\) grid. As we
|
|
|
|
|
increment the number of control--points the correlation gets worse until
|
|
|
|
|
it is completely random in a single dataset. Taking all presented
|
|
|
|
|
datasets into account we even get a \emph{strong} correlation of
|
|
|
|
|
\(- r_S = -0.72, p = 0\), that is opposed to our expectations.
|
|
|
|
|
|
|
|
|
|
To explain this discrepancy we took a closer look at what caused these
|
|
|
|
|
high number of iterations. In figure \ref{fig:1dreg} we also plotted the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{improvement potential} against the steps next to the
|
|
|
|
|
\emph{regularity}--plot. Our theory is that the \emph{very strong}
|
|
|
|
|
correlation (\(-r_S = -0.82, p=0\)) between \emph{improvement potential}
|
|
|
|
|
and number of iterations hints that the employed algorithm simply takes
|
|
|
|
|
longer to converge on a better solution (as seen in figure
|
|
|
|
|
\ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the
|
|
|
|
|
regularity--measurement could achieve.
|
2017-10-24 11:22:20 +00:00
|
|
|
|
|
|
|
|
|
\subsection{Improvement Potential}\label{improvement-potential-1}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted
|
2017-10-24 11:22:20 +00:00
|
|
|
|
against the error yielded by the evolutionary optimization for different
|
|
|
|
|
grid--resolutions}
|
|
|
|
|
\label{fig:1dimp}
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
The \emph{improvement potential} should correlate to the quality of the
|
|
|
|
|
fitting--result. We plotted the results for the tested grid--sizes
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) in figure
|
|
|
|
|
\ref{fig:1dimp}. We tested the \(4 \times 7\) and \(7 \times 4\) grids
|
|
|
|
|
as well, but omitted them from the plot.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
Additionally we tested the results for a distorted gradient described in
|
|
|
|
|
\ref{sec:proc:1d} with a \(\mu\)--value of \(0.25\), \(0.5\), \(0,75\),
|
|
|
|
|
and \(1.0\) for the \(5 \times 5\) grid and with a \(\mu\)--value of
|
|
|
|
|
\(0.5\) for all other cases.
|
|
|
|
|
|
|
|
|
|
All results show the identical \emph{very strong} and \emph{significant}
|
|
|
|
|
correlation with a Spearman--coefficient of \(- r_S = 1.0\) and p--value
|
|
|
|
|
of \(0\).
|
|
|
|
|
|
|
|
|
|
These results indicate, that \(\|\mathbb{1} - \vec{U}\vec{U}^{+}\|_F\)
|
|
|
|
|
is close to \(0\), reducing the impacts of any kind of gradient.
|
|
|
|
|
Nevertheless, the improvement potential seems to be suited to make
|
|
|
|
|
estimated guesses about the quality of a fit, even lacking an exact
|
|
|
|
|
gradient.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\section{Procedure: 3D Function
|
|
|
|
|
Approximation}\label{procedure-3d-function-approximation}
|
|
|
|
|
|
|
|
|
|
\label{sec:proc:3dfa}
|
|
|
|
|
|
|
|
|
|
As explained in section \ref{sec:test:3dfa} in detail, we do not know
|
|
|
|
|
the analytical solution to the global optimum. Additionally we have the
|
|
|
|
|
problem of finding the right correspondences between the original
|
|
|
|
|
sphere--model and the target--model, as they consist of \(10\,807\) and
|
|
|
|
|
\(12\,024\) vertices respectively, so we cannot make a
|
|
|
|
|
one--to--one--correspondence between them as we did in the
|
|
|
|
|
one--dimensional case.
|
|
|
|
|
|
|
|
|
|
Initially we set up the correspondences \(\vec{c_T(\dots)}\) and
|
|
|
|
|
\(\vec{c_S(\dots)}\) to be the respectively closest vertices of the
|
|
|
|
|
other model. We then calculate the analytical solution given these
|
|
|
|
|
correspondences via \(\vec{P^{*}} = \vec{U^+}\vec{T}\), and also use the
|
|
|
|
|
first solution as guessed gradient for the calculation of the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{improvement potential}, as the optimal solution is not known. We
|
2017-10-25 18:40:51 +00:00
|
|
|
|
then let the evolutionary algorithm run up within \(1.05\) times the
|
|
|
|
|
error of this solution and afterwards recalculate the correspondences
|
|
|
|
|
\(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\).
|
|
|
|
|
|
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\begin{center}
|
|
|
|
|
\includegraphics[width=\textwidth]{img/example3d_grid.png}
|
|
|
|
|
\end{center}
|
|
|
|
|
\caption[Example of a 3D--grid]{\newline Left: The 3D--setup with a $4\times
|
|
|
|
|
4\times 4$--grid.\newline Right: The same grid after added noise to the
|
|
|
|
|
control--points.}
|
|
|
|
|
\label{fig:setup3d}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
For the next step we then halve the regularization--impact \(\lambda\)
|
|
|
|
|
(starting at \(1\)) of our \emph{fitness--function} (\ref{eq:fit3d}) and
|
|
|
|
|
calculate the next incremental solution
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\(\vec{P^{*}} = \vec{U^+}\vec{T}\) with the updated correspondences
|
|
|
|
|
(again, mapping each vertex to its closest neighbour in the respective
|
|
|
|
|
other model) to get our next target--error. We repeat this process as
|
|
|
|
|
long as the target--error keeps decreasing and use the number of these
|
|
|
|
|
iterations as measure of the convergence speed. As the resulting
|
|
|
|
|
evolutional error without regularization is in the numeric range of
|
|
|
|
|
\(\approx 100\), whereas the regularization is numerically
|
|
|
|
|
\(\approx 7000\) we need at least \(10\) to \(15\) iterations until the
|
|
|
|
|
regularization--effect wears off.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
The grid we use for our experiments is just very coarse due to
|
|
|
|
|
computational limitations. We are not interested in a good
|
2017-10-29 20:44:10 +00:00
|
|
|
|
reconstruction, but an estimate if the mentioned evolvability--criteria
|
2017-10-25 18:40:51 +00:00
|
|
|
|
are good.
|
|
|
|
|
|
|
|
|
|
In figure \ref{fig:setup3d} we show an example setup of the scene with a
|
|
|
|
|
\(4\times 4\times 4\)--grid. Identical to the 1--dimensional scenario
|
2017-10-29 20:44:10 +00:00
|
|
|
|
before, we create a regular grid and move the control--points in the
|
2017-10-28 18:54:05 +00:00
|
|
|
|
exact same random manner between their neighbours as described in
|
|
|
|
|
section \ref{sec:proc:1d}, but in three instead of two
|
|
|
|
|
dimensions\footnote{Again, we flip the signs for the edges, if necessary
|
|
|
|
|
to have the object still in the convex hull.}.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[!htb]
|
|
|
|
|
\includegraphics[width=\textwidth]{img/3d_grid_resolution.png}
|
|
|
|
|
\caption[Different resolution of 3D grids]{\newline
|
|
|
|
|
Left: A $7 \times 4 \times 4$ grid suited to better deform into facial
|
|
|
|
|
features.\newline
|
|
|
|
|
Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.}
|
|
|
|
|
\label{fig:3dgridres}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
|
|
|
|
As is clearly visible from figure \ref{fig:3dgridres}, the target--model
|
|
|
|
|
has many vertices in the facial area, at the ears and in the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
neck--region. Therefore we chose to increase the grid--resolutions for
|
2017-10-25 18:40:51 +00:00
|
|
|
|
our tests in two different dimensions and see how well the criteria
|
2017-10-29 20:44:10 +00:00
|
|
|
|
predict a suboptimal placement of these control--points.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\section{Results of 3D Function
|
|
|
|
|
Approximation}\label{results-of-3d-function-approximation}
|
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
In the 3D--Approximation we tried to evaluate further on the impact of
|
|
|
|
|
the grid--layout to the overall criteria. As the target--model has many
|
|
|
|
|
vertices in concentrated in the facial area we start from a
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\(4 \times 4 \times 4\) grid and only increase the number of
|
|
|
|
|
control--points in one dimension, yielding a resolution of
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\(7 \times 4 \times 4\) and \(4 \times 4 \times 7\) respectively. We
|
|
|
|
|
visualized those two grids in figure \ref{fig:3dgridres}.
|
|
|
|
|
|
|
|
|
|
To evaluate the performance of the evolvability--criteria we also tested
|
|
|
|
|
a more neutral resolution of \(4 \times 4 \times 4\),
|
|
|
|
|
\(5 \times 5 \times 5\), and \(6 \times 6 \times 6\) --- similar to the
|
|
|
|
|
1D--setup.
|
|
|
|
|
|
|
|
|
|
\begin{figure}[ht]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=0.7\textwidth]{img/evolution3d/variability_boxplot.png}
|
|
|
|
|
\caption[3D Fitting Errors for various grids]{The fitting error for the various
|
|
|
|
|
grids we examined.\newline
|
|
|
|
|
Note that the number of control--points is a product of the resolution, so $X
|
|
|
|
|
\times 4 \times 4$ and $4 \times 4 \times X$ have the same number of
|
|
|
|
|
control--points.}
|
|
|
|
|
\label{fig:3dvar}
|
2017-10-16 20:45:29 +00:00
|
|
|
|
\end{figure}
|
2017-10-01 19:04:04 +00:00
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\subsection{Variability}\label{variability-2}
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\label{sec:res:3d:var}
|
|
|
|
|
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\begin{table}[tbh]
|
|
|
|
|
\centering
|
|
|
|
|
\begin{tabular}{c|c|c|c}
|
|
|
|
|
$4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ & all \\
|
|
|
|
|
\hline
|
|
|
|
|
0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0)
|
|
|
|
|
\end{tabular}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Correlation between *variability* and fitting error for 3D]{Correlation
|
|
|
|
|
between *variability* and fitting error for the 3D fitting scenario.\newline
|
|
|
|
|
Displayed are the negated Spearman coefficients with the corresponding p--values
|
|
|
|
|
in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7],
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\mathrm{Y} \in [4,5,6]$).
|
|
|
|
|
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
|
|
|
|
\label{tab:3dvar}
|
|
|
|
|
\end{table}
|
|
|
|
|
|
|
|
|
|
Similar to the 1D case all our tested matrices had a constant rank
|
|
|
|
|
(being \(m = x \cdot y \cdot z\) for a \(x \times y \times z\) grid), so
|
2017-10-29 18:02:31 +00:00
|
|
|
|
we again have merely plotted the errors in the box plot in figure
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\ref{fig:3dvar}.
|
|
|
|
|
|
|
|
|
|
As expected the \(\mathrm{X} \times 4 \times 4\) grids performed
|
|
|
|
|
slightly better than their \(4 \times 4 \times \mathrm{X}\) counterparts
|
|
|
|
|
with a mean\(\pm\)sigma of \(101.25 \pm 7.45\) to \(102.89 \pm 6.74\)
|
|
|
|
|
for \(\mathrm{X} = 5\) and \(85.37 \pm 7.12\) to \(89.22 \pm 6.49\) for
|
|
|
|
|
\(\mathrm{X} = 7\).
|
|
|
|
|
|
|
|
|
|
Interestingly both variants end up closer in terms of fitting error than
|
|
|
|
|
we anticipated, which shows that the evolutionary algorithm we employed
|
|
|
|
|
is capable of correcting a purposefully created \glqq bad\grqq ~grid.
|
|
|
|
|
Also this confirms, that in our cases the number of control--points is
|
|
|
|
|
more important for quality than their placement, which is captured by
|
2017-10-29 20:44:10 +00:00
|
|
|
|
the \emph{variability} via the rank of the deformation--matrix.
|
|
|
|
|
|
|
|
|
|
Overall the correlation between \emph{variability} and fitness--error
|
|
|
|
|
were \emph{significant} and showed a \emph{very strong} correlation in
|
|
|
|
|
all our tests. The detailed correlation--coefficients are given in table
|
|
|
|
|
\ref{tab:3dvar} alongside their p--values.
|
|
|
|
|
|
|
|
|
|
As introduces in section \ref{sec:impl:grid} and visualized in figure
|
|
|
|
|
\ref{fig:enoughCP}, we know, that not all control--points have to
|
|
|
|
|
necessarily contribute to the parametrization of our 3D--model. Because
|
|
|
|
|
we are starting from a sphere, some control--points are too far away
|
|
|
|
|
from the surface to contribute to the deformation at all.
|
|
|
|
|
|
|
|
|
|
One can already see in 2D in figure \ref{fig:enoughCP}, that this effect
|
|
|
|
|
starts with a regular \(9 \times 9\) grid on a perfect circle. To make
|
|
|
|
|
sure we observe this, we evaluated the \emph{variability} for 100
|
|
|
|
|
randomly moved \(10 \times 10 \times 10\) grids on the sphere we start
|
|
|
|
|
out with.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[hbt]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png}
|
|
|
|
|
\caption[Histogram of ranks of high--resolution deformation--matrices]{
|
2017-10-27 12:31:55 +00:00
|
|
|
|
Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$
|
2017-10-29 20:44:10 +00:00
|
|
|
|
control--points each showing in this case how many control--points are actually
|
2017-10-29 18:02:31 +00:00
|
|
|
|
used in the calculations.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
}
|
|
|
|
|
\label{fig:histrank3d}
|
2017-10-01 19:04:04 +00:00
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
As the \emph{variability} is defined by
|
|
|
|
|
\(\frac{\mathrm{rank}(\vec{U})}{n}\) we can easily recover the rank of
|
|
|
|
|
the deformation--matrix \(\vec{U}\). The results are shown in the
|
|
|
|
|
histogram in figure \ref{fig:histrank3d}. Especially in the centre of
|
|
|
|
|
the sphere and in the corners of our grid we effectively loose
|
|
|
|
|
control--points for our parametrization.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
This of course yields a worse error as when those control--points would
|
|
|
|
|
be put to use and one should expect a loss in quality evident by a
|
|
|
|
|
higher reconstruction--error opposed to a grid where they are used.
|
|
|
|
|
Sadly we could not run a in--depth test on this due to computational
|
|
|
|
|
limitations.
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Nevertheless this hints at the notion, that \emph{variability} is a good
|
2017-10-25 18:40:51 +00:00
|
|
|
|
measure for the overall quality of a fit.
|
|
|
|
|
|
|
|
|
|
\subsection{Regularity}\label{regularity-2}
|
|
|
|
|
|
|
|
|
|
\begin{table}[tbh]
|
|
|
|
|
\centering
|
|
|
|
|
\begin{tabular}{c|c|c|c}
|
|
|
|
|
& $5 \times 4 \times 4$ & $7 \times 4 \times 4$ & $\mathrm{X} \times 4 \times 4$ \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& \textcolor{red}{0.15} (0.147) & \textcolor{red}{0.09} (0.37) & 0.46 (0) \B \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\hline
|
|
|
|
|
$4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \times 4 \times \mathrm{X}$ \T \\
|
|
|
|
|
\hline
|
|
|
|
|
0.38 (0) & \textcolor{red}{0.17} (0.09) & 0.40 (0) & 0.46 (0) \B \\
|
|
|
|
|
\hline
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& $5 \times 5 \times 5$ & $6 \times 6 \times 6$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ \T \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& \textcolor{red}{-0.18} (0.0775) & \textcolor{red}{-0.13} (0.1715) & -0.25 (0) \B \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{3}{c}{} & all: 0.15 (0) \T
|
|
|
|
|
\end{tabular}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Correlation between *regularity* and iterations for 3D]{Correlation
|
|
|
|
|
between *regularity* and number of iterations for the 3D fitting scenario.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
Displayed are the negated Spearman coefficients with the corresponding p--values
|
|
|
|
|
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
|
|
|
|
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\label{tab:3dreg}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\end{table}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Opposed to the predictions of \emph{variability} our test on
|
|
|
|
|
\emph{regularity} gave a mixed result --- similar to the 1D--case.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
In roughly half of the scenarios we have a \emph{significant}, but
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{weak} to \emph{moderate} correlation between \emph{regularity} and
|
|
|
|
|
number of iterations. On the other hand in the scenarios where we
|
|
|
|
|
increased the number of control--points, namely \(125\) for the
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\(5 \times 5 \times 5\) grid and \(216\) for the \(6 \times 6 \times 6\)
|
|
|
|
|
grid we found a \emph{significant}, but \emph{weak}
|
|
|
|
|
\textbf{anti}--correlation when taking all three tests into
|
|
|
|
|
account\footnote{Displayed as \(Y \times Y \times Y\)}, which seem to
|
2017-10-25 18:40:51 +00:00
|
|
|
|
contradict the findings/trends for the sets with \(64\), \(80\), and
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\(112\) control--points (first two rows of table \ref{tab:3dreg}).
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
Taking all results together we only find a \emph{very weak}, but
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{significant} link between \emph{regularity} and the number of
|
|
|
|
|
iterations needed for the algorithm to converge.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[!htb]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png}
|
|
|
|
|
\caption[Regularity for different 3D--grids]{
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Plots of *regularity* against number of iterations for various scenarios together
|
2017-10-27 12:31:55 +00:00
|
|
|
|
with a linear fit to indicate trends.}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{fig:resreg3d}
|
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
As can be seen from figure \ref{fig:resreg3d}, we can observe that
|
|
|
|
|
increasing the number of control--points helps the convergence--speeds.
|
|
|
|
|
The regularity--criterion first behaves as we would like to, but then
|
|
|
|
|
switches to behave exactly opposite to our expectations, as can be seen
|
|
|
|
|
in the first three plots. While the number of control--points increases
|
|
|
|
|
from red to green to blue and the number of iterations decreases, the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{regularity} seems to increase at first, but then decreases again
|
|
|
|
|
on higher grid--resolutions.
|
2017-10-27 12:31:55 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
This can be an artefact of the definition of \emph{regularity}, as it is
|
2017-10-27 12:31:55 +00:00
|
|
|
|
defined by the inverse condition--number of the deformation--matrix
|
|
|
|
|
\(\vec{U}\), being the fraction
|
|
|
|
|
\(\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}\) between the
|
|
|
|
|
least and greatest right singular value.
|
|
|
|
|
|
|
|
|
|
As we observed in the previous section, we cannot guarantee that each
|
|
|
|
|
control--point has an effect (see figure \ref{fig:histrank3d}) and so a
|
|
|
|
|
small minimal right singular value occurring on higher grid--resolutions
|
|
|
|
|
seems likely the problem.
|
|
|
|
|
|
|
|
|
|
Adding to this we also noted, that in the case of the
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\(10 \times 10 \times 10\)--grid the \emph{regularity} was always \(0\),
|
|
|
|
|
as a non--contributing control--point yields a \(0\)--column in the
|
2017-10-27 12:31:55 +00:00
|
|
|
|
deformation--matrix, thus letting \(\sigma_\mathrm{min} = 0\). A better
|
2017-10-29 20:44:10 +00:00
|
|
|
|
definition for \emph{regularity} (i.e.~using the smallest non--zero
|
|
|
|
|
right singular value) could solve this particular issue, but not fix the
|
|
|
|
|
trend we noticed above.
|
2017-10-25 18:40:51 +00:00
|
|
|
|
|
|
|
|
|
\subsection{Improvement Potential}\label{improvement-potential-2}
|
|
|
|
|
|
2017-10-27 12:31:55 +00:00
|
|
|
|
\begin{table}[tbh]
|
|
|
|
|
\centering
|
|
|
|
|
\begin{tabular}{c|c|c|c}
|
|
|
|
|
& $5 \times 4 \times 4$ & $7 \times 4 \times 4$ & $\mathrm{X} \times 4 \times 4$ \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& 0.3 (0.0023) & \textcolor{red}{0.23} (0.0233) & 0.89 (0) \B \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\hline
|
|
|
|
|
$4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \times 4 \times \mathrm{X}$ \T \\
|
|
|
|
|
\hline
|
|
|
|
|
0.5 (0) & 0.38 (0) & 0.32 (0.0012) & 0.9 (0) \B \\
|
|
|
|
|
\hline
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& $5 \times 5 \times 5$ & $6 \times 6 \times 6$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ \T \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
& 0.47 (0) & \textcolor{red}{-0.01} (0.8803) & 0.89 (0) \B \\
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{4}{c}{} \\[-1.4em]
|
|
|
|
|
\cline{2-4}
|
|
|
|
|
\multicolumn{3}{c}{} & all: 0.95 (0) \T
|
|
|
|
|
\end{tabular}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation
|
|
|
|
|
between *improvement potential* and fitting--error for the 3D fitting scenario.
|
2017-10-27 12:31:55 +00:00
|
|
|
|
Displayed are the negated Spearman coefficients with the corresponding p--values
|
|
|
|
|
in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$).
|
|
|
|
|
\newline Note: Not significant results are marked in \textcolor{red}{red}.}
|
|
|
|
|
\label{tab:3dimp}
|
|
|
|
|
\end{table}
|
|
|
|
|
|
2017-10-28 18:54:05 +00:00
|
|
|
|
Comparing to the 1D--scenario, we do not know the optimal solution to
|
|
|
|
|
the given problem and for the calculation we only use the initial
|
|
|
|
|
gradient produced by the initial correlation between both objects. This
|
|
|
|
|
gradient changes with every iteration and will be off our first guess
|
|
|
|
|
very quickly. This is the reason we are not trying to create
|
|
|
|
|
artificially bad gradients, as we have a broad range in quality of such
|
|
|
|
|
gradients anyway.
|
|
|
|
|
|
|
|
|
|
\begin{figure}[htb]
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png}
|
|
|
|
|
\caption[Improvement potential for different 3D--grids]{
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Plots of *improvement potential* against error given by our *fitness--function*
|
2017-10-27 12:31:55 +00:00
|
|
|
|
after convergence together with a linear fit of each of the plotted data to
|
|
|
|
|
indicate trends.}
|
2017-10-25 18:40:51 +00:00
|
|
|
|
\label{fig:resimp3d}
|
2017-10-24 11:22:20 +00:00
|
|
|
|
\end{figure}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
We plotted our findings on the \emph{improvement potential} in a similar
|
|
|
|
|
way as we did before with the \emph{regularity}. In figure
|
|
|
|
|
\ref{fig:resimp3d} one can clearly see the correlation and the spread
|
|
|
|
|
within each setup and the behaviour when we increase the number of
|
|
|
|
|
control--points.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
2017-10-29 18:02:31 +00:00
|
|
|
|
Along with this we also give the Spearman--coefficients along with their
|
2017-10-28 18:54:05 +00:00
|
|
|
|
p--values in table \ref{tab:3dimp}. Within one scenario we only find a
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\emph{weak} to \emph{moderate} correlation between the \emph{improvement
|
|
|
|
|
potential} and the fitting error, but all findings (except for
|
2017-10-28 18:54:05 +00:00
|
|
|
|
\(7 \times 4 \times 4\) and \(6 \times 6 \times 6\)) are significant.
|
|
|
|
|
|
|
|
|
|
If we take multiple datasets into account the correlation is \emph{very
|
|
|
|
|
strong} and \emph{significant}, which is good, as this functions as a
|
|
|
|
|
litmus--test, because the quality is naturally tied to the number of
|
|
|
|
|
control--points.
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
All in all the \emph{improvement potential} seems to be a good and
|
|
|
|
|
sensible measure of quality, even given gradients of varying quality.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Lastly, a small note on the behaviour of \emph{improvement potential}
|
|
|
|
|
and convergence speed, as we used this in the 1D case to argue, why the
|
2017-10-29 18:02:31 +00:00
|
|
|
|
\emph{regularity} defied our expectations. As a contrast we wanted to
|
2017-10-29 20:44:10 +00:00
|
|
|
|
show, that \emph{improvement potential} cannot serve for good
|
|
|
|
|
predictions of the convergence speed. In figure \ref{fig:imp1d3d} we
|
|
|
|
|
show \emph{improvement potential} against number of iterations for both
|
|
|
|
|
scenarios. As one can see, in the 1D scenario we have a \emph{strong}
|
|
|
|
|
and \emph{significant} correlation (with \(-r_S = -0.72\), \(p = 0\)),
|
|
|
|
|
whereas in the 3D scenario we have the opposite \emph{significant} and
|
|
|
|
|
\emph{strong} effect (with \(-r_S = 0.69\), \(p=0\)), so these
|
|
|
|
|
correlations clearly seem to be dependent on the scenario and are not
|
|
|
|
|
suited for generalization.
|
2017-10-29 18:02:31 +00:00
|
|
|
|
|
|
|
|
|
\begin{figure}[hbt]
|
|
|
|
|
\centering
|
|
|
|
|
\includegraphics[width=\textwidth]{img/imp1d3d.png}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
\caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{
|
2017-10-29 18:02:31 +00:00
|
|
|
|
\newline
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Left: *Improvement potential* against convergence speed for the
|
2017-10-29 18:02:31 +00:00
|
|
|
|
1D--scenario\newline
|
2017-10-29 20:44:10 +00:00
|
|
|
|
Right: *Improvement potential* against convergence speed for the 3D--scnario
|
2017-10-29 18:02:31 +00:00
|
|
|
|
}
|
|
|
|
|
\label{fig:imp1d3d}
|
|
|
|
|
\end{figure}
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
\chapter{Discussion and outlook}\label{discussion-and-outlook}
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 19:08:29 +00:00
|
|
|
|
\label{sec:dis}
|
|
|
|
|
|
2017-10-29 20:44:10 +00:00
|
|
|
|
In this thesis we took a look at the different criteria for
|
|
|
|
|
\emph{evolvability} as introduced by Richter et al.\cite{anrichterEvol},
|
|
|
|
|
namely \emph{variability}, \emph{regularity} and \emph{improvement
|
|
|
|
|
potential} under different setup--conditions. Where Richter et al. used
|
|
|
|
|
\acf{RBF}, we employed \acf{FFD} to set up a low--complexity
|
|
|
|
|
parametrization of a more complex vertex--mesh.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
In our findings we could show in the 1D--scenario, that there were
|
2017-10-29 20:44:10 +00:00
|
|
|
|
statistically \emph{significant} \emph{very strong} correlations between
|
|
|
|
|
\emph{variability and fitting error} (\(0.94\)) and \emph{improvement
|
|
|
|
|
potential and fitting error} (\(1.0\)) with comparable results than
|
|
|
|
|
Richter et al. (with \(0.31\) to \(0.88\) for the former and \(0.75\) to
|
|
|
|
|
\(0.99\) for the latter), whereas we found only \emph{weak} correlations
|
|
|
|
|
for \emph{regularity and convergence--speed} (\(0.28\)) opposed to
|
|
|
|
|
Richter et al. with \(0.39\) to \(0.91\).\footnote{We only took
|
|
|
|
|
statistically \emph{significant} results into consideration when
|
|
|
|
|
compiling these numbers. Details are given in the respective chapters.}
|
|
|
|
|
|
|
|
|
|
For the 3D--scenario our results show a \emph{very strong},
|
|
|
|
|
\emph{significant} correlation between \emph{variability and fitting
|
|
|
|
|
error} with \(0.89\) to \(0.94\), which are pretty much in line with the
|
|
|
|
|
findings of Richter et al. (\(0.65\) to \(0.95\)). The correlation
|
|
|
|
|
between \emph{improvement potential and fitting error} behave similar,
|
|
|
|
|
with our findings having a significant coefficient of \(0.3\) to
|
|
|
|
|
\(0.95\) depending on the grid--resolution compared to the \(0.61\) to
|
|
|
|
|
\(0.93\) from Richter et al. In the case of the correlation of
|
|
|
|
|
\emph{regularity and convergence speed} we found very different (and
|
|
|
|
|
often not significant) correlations and anti--correlations ranging from
|
|
|
|
|
\(-0.25\) to \(0.46\), whereas Richter et al. reported correlations
|
|
|
|
|
between \(0.34\) to \(0.87\).
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
Taking these results into consideration, one can say, that
|
|
|
|
|
\emph{variability} and \emph{improvement potential} are very good
|
|
|
|
|
estimates for the quality of a fit using \acf{FFD} as a deformation
|
2017-10-29 18:02:31 +00:00
|
|
|
|
function, while we could not reproduce similar compelling results as
|
|
|
|
|
Richter et al. for \emph{regularity and convergence speed}.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
One reason for the bad or erratic behaviour of the
|
|
|
|
|
\emph{regularity}--criterion could be that in an \ac{FFD}--setting we
|
|
|
|
|
have a likelihood of having control--points that are only contributing
|
2017-10-29 18:02:31 +00:00
|
|
|
|
to the whole parametrization in negligible amounts, resulting in very
|
2017-10-28 18:54:05 +00:00
|
|
|
|
small right singular values of the deformation--matrix \(\vec{U}\) that
|
|
|
|
|
influence the condition--number and thus the \emph{regularity} in a
|
|
|
|
|
significant way. Further research is needed to refine \emph{regularity}
|
2017-10-29 18:02:31 +00:00
|
|
|
|
so that these problems get addressed, like taking all singular values
|
|
|
|
|
into account when capturing the notion of \emph{regularity}.
|
2017-10-28 18:54:05 +00:00
|
|
|
|
|
|
|
|
|
Richter et al. also compared the behaviour of direct and indirect
|
|
|
|
|
manipulation in \cite{anrichterEvol}, whereas we merely used an indirect
|
|
|
|
|
\ac{FFD}--approach. As direct manipulations tend to perform better than
|
|
|
|
|
indirect manipulations, the usage of \acf{DM--FFD} could also work
|
2017-10-29 18:02:31 +00:00
|
|
|
|
better with the criteria we examined. This can also solve the problem of
|
|
|
|
|
bad singular values for the \emph{regularity} as the incorporation of
|
2017-10-29 20:44:10 +00:00
|
|
|
|
the parametrization of the points on the surface --- which are the
|
|
|
|
|
essential part of a direct--manipulation --- could cancel out a bad
|
2017-10-29 18:02:31 +00:00
|
|
|
|
control--grid as the bad control--points are never or negligibly used to
|
|
|
|
|
parametrize those surface--points.
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% \backmatter
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\cleardoublepage
|
|
|
|
|
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\renewcommand\thechapter{\Alph{chapter}}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\chapter*{Appendix}
|
|
|
|
|
\addcontentsline{toc}{chapter}{\protect\numberline{}Appendix}
|
2017-10-08 21:29:45 +00:00
|
|
|
|
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
|
|
|
|
|
\setcounter{chapter}{0} % reset section to 1 so its stars I, II, III,...
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\pagenumbering{roman}
|
|
|
|
|
%%%%%%%%%%%%%%% Literaturverzeichnis %%%%%%%%%%%%%%%
|
2017-10-07 23:38:30 +00:00
|
|
|
|
\bibliographystyle{unsrtdin} % \bibliographystyle{natdin}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\bibliography{bibma}
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}Bibliography} % Literaturverzeichnis in das Inhaltsverzeichnis aufnehmen
|
|
|
|
|
% \addtocounter{chapter}{1}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\newpage
|
2017-08-23 19:18:44 +00:00
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Anhang %%%%%%%%%%%%%%%
|
2017-08-28 16:39:18 +00:00
|
|
|
|
% \clearpage %spaeter alles wieder rein
|
2017-08-23 19:18:44 +00:00
|
|
|
|
% % \input{files/appendix}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\input{settings/abkuerzungen}
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}Abbreviations}
|
|
|
|
|
% \addtocounter{chapter}{1}
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\newpage
|
|
|
|
|
|
2017-10-02 20:13:24 +00:00
|
|
|
|
% \listofalgorithms
|
|
|
|
|
% \addcontentsline{toc}{section}{\protect\numberline{\thesection}List of Algorithms}
|
|
|
|
|
% \addtocounter{section}{1}
|
|
|
|
|
% \newpage
|
|
|
|
|
%
|
2017-08-28 16:39:18 +00:00
|
|
|
|
\listoffigures
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}List of Figures}
|
|
|
|
|
% \addtocounter{chapter}{1}
|
|
|
|
|
\newpage
|
2017-10-02 20:13:24 +00:00
|
|
|
|
% \listoftables
|
2017-10-29 20:44:10 +00:00
|
|
|
|
% \listoftodos
|
2017-10-08 21:29:45 +00:00
|
|
|
|
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
|
|
|
|
|
% \addtocounter{chapter}{1}
|
2017-10-29 20:44:10 +00:00
|
|
|
|
% \newpage
|
2017-08-23 19:18:44 +00:00
|
|
|
|
% \printindex
|
|
|
|
|
|
|
|
|
|
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%
|
|
|
|
|
% *\input{settings/declaration}
|
|
|
|
|
\include{files/erklaerung}
|
|
|
|
|
\end{document}
|