masterarbeit/arbeit/ma.tex

777 lines
30 KiB
TeX
Raw Normal View History

2017-08-23 19:18:44 +00:00
% bibtotoc[numbered] : Literaturv. wird in Inhaltsv. aufgenommen
% abstracton : Abstract mit Ueberschrift
\documentclass[
a4paper, % default
2017-10-02 19:56:06 +00:00
12pt, % default = 11pt
2017-08-23 19:18:44 +00:00
BCOR6mm, % Bindungskorrektur bei Klebebindung 6mm, bei Lochen BCOR8.25mm
twoside, % default, 2seitig
titlepage,
% pagesize=auto
% openany, % Kapitel koennen auch auf geraden Seiten starten
% draft % schneller compillieren, Bild-dummy
2017-10-08 21:29:45 +00:00
% appendixprefix, % Anhang mit Bezeichner
bibtotocnumbered,
liststotocnumbered,
listof=totocnumbered,
index=totocnumbered,
2017-10-01 19:04:04 +00:00
xcolor=dvipsnames,
2017-08-23 19:18:44 +00:00
]{scrbook}
%%%%%%%%%%%%%%% Literaturverzeichnisstil %%%%%%%%%%%%%%%
% achtung, auch \bibstyle, unten, anpassen!
% \usepackage[square]{natbib} % fuer bibstyle natdin/ see ../natbib.pdf
%%%%%%%%%%%%%%% Packages %%%%%%%%%%%%%%%
\input{settings/packages}
\makeindex
%%%%%%%%%%%%%%% Graphics %%%%%%%%%%%%%%%
\graphicspath{{pics/}}
%%%%%%%%%%%%%%% Globale Einstellungen %%%%%%%%%%%%%%%
\input{settings/commands}
\input{settings/environments}
%\setlength{\parindent}{0pt} % kein einzug bei absaetzen
%\setlength{\lineskip}{1ex plus0.5ex minus0.5ex} % dafr abstand zwischen abs<62>zen (funktioniert noch nicht)
% \renewcommand{\familydefault}{\sfdefault}
2017-10-02 19:56:06 +00:00
\setstretch{1.44} % 1.5-facher zeilenabstand
2017-08-23 19:18:44 +00:00
%%%%%%%%%%%%%%% Header - Footer %%%%%%%%%%%%%%%
% ### Fr 2 Seitig (option twopage):
\usepackage{fancyhdr}%http://www.tug.org/tex-archive/info/german/fancyhdr
\pagestyle{fancy} % must be called before the following renewcommands !!!
\fancyhead{} % Alte Definition loeschen
\fancyfoot{} % dito
\renewcommand{\chaptermark}[1]{\markboth{\chaptername\ \thechapter{}: #1}{}}
\renewcommand{\sectionmark}[1]{\markright{\thesection{}~~#1}}
% % um das hard codierte makeuppercase zu verhindern
\fancyhead[EL]{\textrm{\nouppercase\leftmark}}% Even=linke Seiten und dort links, also aussn das \leftmark
\fancyhead[OR]{\textrm{\nouppercase\rightmark}}% Odd=rechte Seiten und dort rechts, also aussen das \rightmark
\fancyfoot[RO,LE]{\thepage} % Seitenzahl : rechts ungerade, links gerade
2017-10-08 19:08:29 +00:00
% ###### Title ######
\usepackage[explicit]{titlesec}
\newcommand{\hsp}{\hspace{20pt}}
% \titleformat{\chapter}[hang]{\Huge\bfseries\ }{\textcolor{CadetBlue}{\thechapter} #1}{20pt}{\Huge\bfseries\ }
\titleformat{name=\chapter,numberless}[hang]{\Huge\bfseries\ }{#1}{20pt}{\Huge\bfseries\ }
\titleformat{\chapter}[hang]{\Huge\bfseries\ }{\color{CadetBlue}\thechapter}{20pt}{\begin{tabular}[t]{@{\color{CadetBlue}\vrule width 2pt}>{\hangindent=20pt\hsp}p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
2017-10-08 21:29:45 +00:00
\titleformat{name=\section,numberless}[hang]{\Large\bfseries\ }{#1}{32pt}{\Large\bfseries\ }
\titleformat{\section}[hang]{\Large\bfseries\ }{\color{CadetBlue}\thesection}{32pt}{\begin{tabular}[t]{p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
\titleformat{name=\subsection,numberless}[hang]{\large\bfseries\ }{#1}{27pt}{\large\bfseries\ }
\titleformat{\subsection}[hang]{\large\bfseries\ }{\color{CadetBlue}\thesubsection}{27pt}{\begin{tabular}[t]{p{\dimexpr 1\textwidth -44pt}}#1\end{tabular}}
2017-10-08 19:08:29 +00:00
2017-08-23 19:18:44 +00:00
% ### fr 1 seitig
%\usepackage{fancyhdr} %
%\lhead{\textsf{\noupercase\leftmark}}
%\chead{}
%\rhead{\textsf{\nouppercase\rightmark}}
%\lfoot{}
%\cfoot{\textsf{\thepage}}
%\rfoot{}
\setkomafont{sectioning}{\rmfamily\bfseries}
\setcounter{tocdepth}{3}
%\setcounter{secnumdepth}{3}
% \input{settings/hyphenation} %% Manchmal bricht latex nicht richtig um. hier trennregeln rein.
% \includeonly{%
% % files/0_titlepage.tex
% % files/1_0_introduction,%
% % files/2_0_knownDCJ,%
% % files/3_0_DCJIndels,%
% % files/4_0_DCJIndels_1comps,%
% files/5_0_DCJIndels_2comps,%
% % files/6_0_implementation,%
% % files/7_0_evaluation%
% % ,files/8_0_conclusion%
% }
2017-08-28 16:39:18 +00:00
%%%%%%%%%%%%%%% PANDOC-nedded defs %%%%%%%%%%
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
%disable "Redefining ngerman shorthand"-Message
% \makeatletter
% \patchcmd{\pdfstringdef}
% {\csname HyPsd@babel@}
% {\let\bbl@info\@gobble\csname HyPsd@babel@}
% {}{}
% \makeatother
2017-08-23 19:18:44 +00:00
%%%%%%%%%%%%%%% Hauptdokument %%%%%%%%%%%%%%%
\begin{document}
% ###### Autoref definitions (hyperref package)#####
\def\subtableautorefname{Table}
\def\algorithmautorefname{Algorithm}
\def\chapterautorefname{Chapter}
\def\sectionautorefname{Section}
\def\definitionautorefname{Definition}
\def\exampleautorefname{Example}
\def\observationautorefname{Observation}
\def\propositionautorefname{Proposition}
\def\lemmaautorefname{Lemma}
% in diesem Dokument nicht verwendet:
% \def\subsectionautorefname{Subsection}
% \def\Subsubsectionautorefname{Subsubsection}
% \def\subfigureautorefname{Figure}
% \def\claimautorefname{Claim}
%%%%%%%%%%%%%%% Deckblatt %%%%%%%%%%%%%%%
\extratitle{}
\input{files/titlepage}
%\input{files/titlepage.pdf} % Rueckseite leer
% \input{files/0_deckblatt/title}
\pagestyle{empty} % Rueckseite leer
%
%%%%%%%%%%%%%%% Verzeichnisse %%%%%%%%%%%%%%%
\frontmatter % Abstrakte Gliederungsebene: Anfang des Buches
2017-10-08 21:29:45 +00:00
\renewcommand{\autodot}{}
\tableofcontents % Rueckseite leer
2017-08-23 19:18:44 +00:00
%\lstlistoflistings % fuer listingsverzeichnis mit package listings
%%%%%%%%%%%%%%% Hauptteil %%%%%%%%%%%%%%%
% Insgesamt ca. 60-100 Seiten Davon mindesten 50% Eigene Arbeit
\mainmatter %Abstrakte Gliederungsebene: Hauptteil des Buches
\pagestyle{fancy}
2017-08-28 16:39:18 +00:00
\pagenumbering{arabic}
2017-08-30 18:56:35 +00:00
\chapter*{How to read this Thesis}
2017-08-28 16:39:18 +00:00
As a guide through the nomenclature used in the formulas we prepend this
chapter.
Unless otherwise noted the following holds:
\begin{itemize}
\tightlist
\item
lowercase letters \(x,y,z\)\\
2017-10-10 10:24:15 +00:00
refer to real variables and represent a point in 3D--Space.
2017-08-28 16:39:18 +00:00
\item
lowercase letters \(u,v,w\)\\
2017-10-10 10:24:15 +00:00
refer to real variables between \(0\) and \(1\) used as coefficients
2017-10-08 21:29:45 +00:00
in a 3D B--Spline grid.
2017-08-28 16:39:18 +00:00
\item
other lowercase letters\\
2017-10-10 10:24:15 +00:00
refer to other scalar (real) variables.
2017-08-28 16:39:18 +00:00
\item
lowercase \textbf{bold} letters (e.g. \(\vec{x},\vec{y}\))\\
2017-10-10 10:24:15 +00:00
refer to 3D coordinates
2017-08-28 16:39:18 +00:00
\item
2017-09-09 16:38:43 +00:00
uppercase \textbf{BOLD} letters (e.g. \(\vec{D}, \vec{M}\))\\
2017-10-10 10:24:15 +00:00
refer to Matrices
2017-08-28 16:39:18 +00:00
\end{itemize}
\chapter{Introduction}\label{introduction}
2017-10-08 19:08:29 +00:00
\improvement[inline]{Mehr Bilder}
2017-10-07 23:07:37 +00:00
Many modern industrial design processes require advanced optimization
methods do to the increased complexity. These designs have to adhere to
more and more degrees of freedom as methods refine and/or other methods
are used. Examples for this are physical domains like aerodynamic
2017-10-08 21:29:45 +00:00
(i.e.~drag), fluid dynamics (i.e.~throughput of liquid) --- where the
2017-10-07 23:07:37 +00:00
complexity increases with the temporal and spatial resolution of the
2017-10-08 21:29:45 +00:00
simulation --- or known hard algorithmic problems in informatics
(i.e.~layouting of circuit boards or stacking of 3D--objects). Moreover
2017-10-07 23:07:37 +00:00
these are typically not static environments but requirements shift over
time or from case to case.
Evolutional algorithms cope especially well with these problem domains
while addressing all the issues at hand\cite{minai2006complex}. One of
the main concerns in these algorithms is the formulation of the problems
in terms of a genome and a fitness function. While one can typically use
2017-10-08 21:29:45 +00:00
an arbitrary cost--function for the fitness--functions (i.e.~amount of
drag, amount of space, etc.), the translation of the problem--domain
into a simple parametric representation can be challenging.
2017-10-07 23:07:37 +00:00
The quality of such a representation in biological evolution is called
\emph{evolvability}\cite{wagner1996complex} and is at the core of this
thesis. However, there is no consensus on how \emph{evolvability} is
defined and the meaning varies from context to
context\cite{richter2015evolvability}.
As we transfer the results of Richter et al.\cite{anrichterEvol} from
using \acf{RBF} as a representation to manipulate a geometric mesh to
the use of \acf{FFD} we will use the same definition for evolvability
the original author used, namely \emph{regularity}, \emph{variability},
and \emph{improvement potential}. We introduce these term in detail in
Chapter \ref{sec:intro:rvi}.
In the original publication the author used random sampled points
weighted with \acf{RBF} to deform the mesh and showed that the mentioned
criteria of \emph{regularity}, \emph{variability}, and \emph{improvement
potential} correlate with the quality and potential of such
optimization.
2017-08-23 19:18:44 +00:00
2017-08-28 16:39:18 +00:00
We will replicate the same setup on the same meshes but use \acf{FFD}
2017-10-07 23:07:37 +00:00
instead of \acf{RBF} to create a local deformation near the control
2017-10-08 21:29:45 +00:00
points and evaluate if the evolution--criteria still work as a predictor
2017-10-07 23:07:37 +00:00
given the different deformation scheme, as suspected in
\cite{anrichterEvol}.
\section{Outline of this thesis}\label{outline-of-this-thesis}
2017-10-08 19:08:29 +00:00
First we introduce different topics in isolation in Chapter
\ref{sec:back}. We take an abstract look at the definition of \ac{FFD}
2017-10-08 21:29:45 +00:00
for a one--dimensional line (in \ref{sec:back:ffd}) and discuss why this
2017-10-08 19:08:29 +00:00
is a sensible deformation function (in \ref{sec:back:ffdgood}). Then we
2017-10-08 21:29:45 +00:00
establish some background--knowledge of evolutional algorithms (in
2017-10-08 19:08:29 +00:00
\ref{sec:back:evo}) and why this is useful in our domain (in
\ref{sec:back:evogood}). In a third step we take a look at the
definition of the different evolvability criteria established in
\cite{anrichterEvol}.
In Chapter \ref{sec:impl} we take a look at our implementation of
2017-10-08 21:29:45 +00:00
\ac{FFD} and the adaptation for 3D--meshes.
2017-10-08 19:08:29 +00:00
Next, in Chapter \ref{sec:eval}, we describe the different scenarios we
2017-10-08 21:29:45 +00:00
use to evaluate the different evolvability--criteria incorporating all
2017-10-08 19:08:29 +00:00
aspects introduced in Chapter \ref{sec:back}. Following that, we
evaluate the results in Chapter \ref{sec:res} with further on discussion
in Chapter \ref{sec:dis}.
2017-10-07 23:07:37 +00:00
\chapter{Background}\label{background}
2017-08-28 16:39:18 +00:00
2017-10-08 19:08:29 +00:00
\label{sec:back}
2017-08-28 16:39:18 +00:00
\section{\texorpdfstring{What is \acf{FFD}?}{What is ?}}\label{what-is}
2017-10-08 19:08:29 +00:00
\label{sec:back:ffd}
2017-10-02 19:56:06 +00:00
2017-08-28 16:39:18 +00:00
First of all we have to establish how a \ac{FFD} works and why this is a
2017-09-06 15:07:46 +00:00
good tool for deforming meshes in the first place. For simplicity we
2017-10-08 21:29:45 +00:00
only summarize the 1D--case from \cite{spitzmuller1996bezier} here and
go into the extension to the 3D case in chapter \ref{3dffd}.
2017-09-06 15:07:46 +00:00
Given an arbitrary number of points \(p_i\) alongside a line, we map a
scalar value \(\tau_i \in [0,1[\) to each point with
\(\tau_i < \tau_{i+1} \forall i\). Given a degree of the target
polynomial \(d\) we define the curve \(N_{i,d,\tau_i}(u)\) as follows:
2017-09-09 16:38:43 +00:00
\begin{equation} \label{eqn:ffd1d1}
2017-09-06 15:07:46 +00:00
N_{i,0,\tau}(u) = \begin{cases} 1, & u \in [\tau_i, \tau_{i+1}[ \\ 0, & \mbox{otherwise} \end{cases}
\end{equation}
2017-09-09 16:38:43 +00:00
and
\begin{equation} \label{eqn:ffd1d2}
2017-09-06 15:07:46 +00:00
N_{i,d,\tau}(u) = \frac{u-\tau_i}{\tau_{i+d}} N_{i,d-1,\tau}(u) + \frac{\tau_{i+d+1} - u}{\tau_{i+d+1}-\tau_{i+1}} N_{i+1,d-1,\tau}(u)
\end{equation}
If we now multiply every \(p_i\) with the corresponding
\(N_{i,d,\tau_i}(u)\) we get the contribution of each point \(p_i\) to
2017-10-08 21:29:45 +00:00
the final curve--point parameterized only by \(u \in [0,1[\). As can be
2017-09-09 16:38:43 +00:00
seen from \eqref{eqn:ffd1d2} we only access points \([i..i+d]\) for any
given \(i\)\footnote{one more for each recursive step.}, which gives us,
in combination with choosing \(p_i\) and \(\tau_i\) in order, only a
2017-09-06 15:07:46 +00:00
local interference of \(d+1\) points.
We can even derive this equation straightforward for an arbitrary
\(N\)\footnote{\emph{Warning:} in the case of \(d=1\) the
2017-10-08 21:29:45 +00:00
recursion--formula yields a \(0\) denominator, but \(N\) is also
\(0\). The right solution for this case is a derivative of \(0\)}:
2017-09-06 15:07:46 +00:00
\[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\]
2017-10-08 21:29:45 +00:00
For a B--Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these
2017-09-06 15:07:46 +00:00
derivations yield \(\frac{\partial^d}{\partial u} s(u) = 0\).
Another interesting property of these recursive polynomials is that they
are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in
linearly between \(\tau_i\) and \(\tau_{i+d}\) and out linearly between
\(\tau_{i+1}\) and \(\tau_{i+d+1}\) as can bee seen from the two
coefficients in every step of the recursion.
\subsection{\texorpdfstring{Why is \ac{FFD} a good deformation
function?}{Why is a good deformation function?}}\label{why-is-a-good-deformation-function}
2017-10-08 19:08:29 +00:00
\label{sec:back:ffdgood}
2017-09-06 15:07:46 +00:00
The usage of \ac{FFD} as a tool for manipulating follows directly from
the properties of the polynomials and the correspondence to the control
points. Having only a few control points gives the user a nicer
2017-10-08 21:29:45 +00:00
high--level--interface, as she only needs to move these points and the
2017-09-06 15:07:46 +00:00
model follows in an intuitive manner. The deformation is smooth as the
underlying polygon is smooth as well and affects as many vertices of the
model as needed. Moreover the changes are always local so one risks not
any change that a user cannot immediately see.
But there are also disadvantages of this approach. The user loses the
ability to directly influence vertices and even seemingly simple tasks
as creating a plateau can be difficult to
2017-09-27 20:06:39 +00:00
achieve\cite[chapter~3.2]{hsu1991dmffd}\cite{hsu1992direct}.
2017-09-06 15:07:46 +00:00
This disadvantages led to the formulation of
2017-10-08 21:29:45 +00:00
\acf{DM--FFD}\cite[chapter~3.3]{hsu1991dmffd} in which the user directly
interacts with the surface--mesh. All interactions will be applied
proportionally to the control--points that make up the parametrization
of the interaction--point itself yielding a smooth deformation of the
2017-09-06 15:07:46 +00:00
surface \emph{at} the surface without seemingly arbitrary scattered
2017-10-08 21:29:45 +00:00
control--points. Moreover this increases the efficiency of an
2017-09-27 20:06:39 +00:00
evolutionary optimization\cite{Menzel2006}, which we will use later on.
2017-09-06 15:07:46 +00:00
2017-10-02 20:13:24 +00:00
\begin{figure}[!ht]
\includegraphics[width=\textwidth]{img/hsu_fig7.png}
2017-10-02 20:36:46 +00:00
\caption{Figure 7 from \cite{hsu1991dmffd}.}
2017-10-02 20:13:24 +00:00
\label{fig:hsu_fig7}
\end{figure}
But this approach also has downsides as can be seen in figure
\ref{fig:hsu_fig7}, as the tessellation of the invisible grid has a
major impact on the deformation itself.
2017-09-06 15:07:46 +00:00
2017-10-08 21:29:45 +00:00
All in all \ac{FFD} and \ac{DM--FFD} are still good ways to deform a
high--polygon mesh albeit the downsides.
2017-09-06 15:07:46 +00:00
2017-09-27 20:06:39 +00:00
\section{What is evolutional
optimization?}\label{what-is-evolutional-optimization}
2017-08-23 19:18:44 +00:00
2017-10-08 19:08:29 +00:00
\label{sec:back:evo}
2017-10-10 10:24:15 +00:00
\change[inline]{Write this section} In this thesis we are using an
evolutional optimization strategy to solve the problem of finding the
best parameters for our deformation. This approach, however, is very
generic and we introduce it here in a broader sense.
\begin{algorithm}
\caption{An outline of evolutional algorithms}
\label{alg:evo}
\begin{algorithmic}
\STATE t := 0;
\STATE initialize $P(0) := \{\vec{a}_1(0),\dots,\vec{a}_\mu(0)\} \in I^\mu$;
\STATE evaluate $F(0) : \{\Phi(x) | x \in P(0)\}$;
\WHILE{$c(F(t)) \neq$ \TRUE}
\STATE recombine: $P(t) := r(P(t))$;
\STATE mutate: $P''(t) := m(P(t))$;
\STATE evaluate $F''(t) : \{\Phi(x) | x \in P''(t)\}$
\STATE select: $P(t + 1) := s(P''(t) \cup Q,\Phi)$;
\STATE t := t + 1;
\ENDWHILE
\end{algorithmic}
\end{algorithm}
The general shape of an evolutional algorithm (adapted from
\cite{back1993overview}\} is outlined in Algorithm \ref{alg:evo}. Here,
\(P(t)\) denotes the population of parameters in step \(t\) of the
algorithm. The population contains \(\mu\) individuals \(a_i\) that fit
the shape of the parameters we are looking for. Typically these are
initialized by a random guess or just zero. Further on we need a
so-called \emph{fitness-function} \(\Phi : I \mapsto M\) that can take
each parameter to a measurable space along a convergence-function
\(c : I \mapsto \mathbb{B}\) that terminates the optimization.
The main algorithm just repeats the following steps:
\begin{itemize}
\tightlist
\item
\textbf{Recombine} with a recombination-function
\(r : I^{\mu} \mapsto I^{\lambda}\) to generate new individuals based
on the parents characteristics.\\
This makes sure that the next guess is close to the old guess.
\item
\textbf{Mutate} with a mutation-function
\(m : I^{\lambda} \mapsto I^{\lambda}\) to introduce new effects that
cannot be produced by mere recombination of the parents.\\
Typically this just adds minor defects to individual members of the
population like adding a random gaussian noise or amplifying/dampening
random parts.
\item
\textbf{Selection} takes a selection-function
\(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that
selects from the previously generated \(I^\lambda\) children and
optionally also the parents (denoted by the set \(Q\) in the
algorithm) using the fitness-function \(\Phi\). The result of this
operation is the next Population of \(\mu\) individuals.
\end{itemize}
All these functions can (and mostly do) have a lot of hidden parameters
that can be changed over time. One can for example start off with a high
mutation--rate that cools off over time (i.e.~by lowering the variance
of a gaussian noise). As the recombination and selection-steps are
usually pure
2017-10-02 19:56:06 +00:00
2017-10-01 19:04:04 +00:00
\section{Advantages of evolutional
algorithms}\label{advantages-of-evolutional-algorithms}
2017-08-23 19:18:44 +00:00
2017-10-08 19:08:29 +00:00
\label{sec:back:evogood}
2017-10-02 19:56:06 +00:00
\change[inline]{Needs citations} The main advantage of evolutional
2017-10-01 19:04:04 +00:00
algorithms is the ability to find optima of general functions just with
2017-10-08 21:29:45 +00:00
the help of a given error--function (or fitness--function in this
domain). This avoids the general pitfalls of gradient--based procedures,
which often target the same error--function as an evolutional algorithm,
but can get stuck in local optima.
2017-09-27 20:06:39 +00:00
2017-10-08 21:29:45 +00:00
This is mostly due to the fact that a gradient--based procedure has only
2017-09-27 20:06:39 +00:00
one point of observation from where it evaluates the next steps, whereas
an evolutional strategy starts with a population of guessed solutions.
Because an evolutional strategy modifies the solution randomly, keeps
the best solutions and purges the worst, it can also target multiple
different hypothesis at the same time where the local optima die out in
the face of other, better candidates.
2017-10-08 21:29:45 +00:00
If an analytic best solution exists (i.e.~because the error--function is
2017-09-27 20:06:39 +00:00
convex) an evolutional algorithm is not the right choice. Although both
converge to the same solution, the analytic one is usually faster. But
in reality many problems have no analytic solution, because the problem
is not convex. Here evolutional optimization has one more advantage as
you get bad solutions fast, which refine over time.
2017-10-01 18:06:41 +00:00
\section{Criteria for the evolvability of linear
deformations}\label{criteria-for-the-evolvability-of-linear-deformations}
2017-08-23 19:18:44 +00:00
2017-10-07 23:07:37 +00:00
\label{sec:intro:rvi}
2017-10-01 18:06:41 +00:00
\subsection{Variability}\label{variability}
2017-10-08 19:08:29 +00:00
In \cite{anrichterEvol} \emph{variability} is defined as
2017-10-01 18:06:41 +00:00
\[V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n},\] whereby \(\vec{U}\)
2017-10-08 21:29:45 +00:00
is the \(m \times n\) deformation--Matrix used to map the \(m\) control
2017-10-01 18:06:41 +00:00
points onto the \(n\) vertices.
2017-10-08 21:29:45 +00:00
Given \(n = m\), an identical number of control--points and vertices,
2017-10-01 18:06:41 +00:00
this quotient will be \(=1\) if all control points are independent of
2017-10-08 21:29:45 +00:00
each other and the solution is to trivially move every control--point
onto a target--point.
2017-10-01 18:06:41 +00:00
In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because as
2017-10-08 21:29:45 +00:00
there are only few control--points for many vertices, so \(m \ll n\).
2017-10-01 18:06:41 +00:00
2017-10-08 21:29:45 +00:00
Additionally in our setup we connect neighbouring control--points in a
2017-10-01 18:06:41 +00:00
grid so each control point is not independent, but typically depends on
2017-10-08 21:29:45 +00:00
\(4^d\) control--points for an \(d\)--dimensional control mesh.
2017-10-01 18:06:41 +00:00
\subsection{Regularity}\label{regularity}
2017-10-08 19:08:29 +00:00
\emph{Regularity} is defined\cite{anrichterEvol} as
2017-10-01 18:06:41 +00:00
\[R(\vec{U}) := \frac{1}{\kappa(\vec{U})} = \frac{\sigma_{min}}{\sigma_{max}}\]
where \(\sigma_{min}\) and \(\sigma_{max}\) are the smallest and
2017-10-08 21:29:45 +00:00
greatest right singular value of the deformation--matrix \(\vec{U}\).
2017-10-01 18:06:41 +00:00
As we deform the given Object only based on the parameters as
\(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that
\(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\).
The inversion of \(\kappa(\vec{U})\) is only performed to map the
2017-10-08 21:29:45 +00:00
criterion--range to \([0..1]\), whereas \(1\) is the optimal value and
2017-10-01 18:06:41 +00:00
\(0\) is the worst value.
This criterion should be characteristic for numeric stability on the on
hand\cite[chapter 2.7]{golub2012matrix} and for convergence speed of
evolutional algorithms on the other hand\cite{anrichterEvol} as it is
tied to the notion of
locality\cite{weise2012evolutionary,thorhauer2014locality}.
\subsection{Improvement Potential}\label{improvement-potential}
2017-10-08 19:08:29 +00:00
In contrast to the general nature of \emph{variability} and
2017-10-08 21:29:45 +00:00
\emph{regularity}, which are agnostic of the fitness--function at hand
2017-10-08 19:08:29 +00:00
the third criterion should reflect a notion of potential.
2017-10-01 18:06:41 +00:00
As during optimization some kind of gradient \(g\) is available to
suggest a direction worth pursuing we use this to guess how much change
can be achieved in the given direction.
2017-10-08 19:08:29 +00:00
The definition for an \emph{improvement potential} \(P\)
2017-10-01 18:06:41 +00:00
is\cite{anrichterEvol}: \[
P(\vec{U}) := 1 - \|(\vec{1} - \vec{UU}^+)\vec(G)\|^2_F
2017-10-08 21:29:45 +00:00
\] given some approximate \(n \times d\) fitness--gradient \(\vec{G}\),
2017-10-01 18:06:41 +00:00
normalized to \(\|\vec{G}\|_F = 1\), whereby \(\|\cdot\|_F\) denotes the
2017-10-08 21:29:45 +00:00
Frobenius--Norm.
2017-08-23 19:18:44 +00:00
2017-10-01 19:04:04 +00:00
\chapter{\texorpdfstring{Implementation of
\acf{FFD}}{Implementation of }}\label{implementation-of}
2017-08-23 19:18:44 +00:00
2017-10-08 19:08:29 +00:00
\label{sec:impl}
2017-10-08 21:29:45 +00:00
The general formulation of B--Splines has two free parameters \(d\) and
2017-10-02 20:36:46 +00:00
\(\tau\) which must be chosen beforehand.
2017-10-02 19:56:06 +00:00
As we usually work with regular grids in our \ac{FFD} we define \(\tau\)
2017-10-02 20:36:46 +00:00
statically as \(\tau_i = \nicefrac{i}{n}\) whereby \(n\) is the number
2017-10-08 21:29:45 +00:00
of control--points in that direction.
2017-10-02 19:56:06 +00:00
2017-10-08 21:29:45 +00:00
\(d\) defines the \emph{degree} of the B--Spline--Function (the number
of times this function is differentiable) and for our purposes we fix
\(d\) to \(3\), but give the formulas for the general case so it can be
2017-10-02 19:56:06 +00:00
adapted quite freely.
\section{\texorpdfstring{Adaption of
\ac{FFD}}{Adaption of }}\label{adaption-of}
2017-10-08 19:08:29 +00:00
As we have established in Chapter \ref{sec:back:ffd} we can define an
2017-10-08 21:29:45 +00:00
\ac{FFD}--displacement as
2017-10-02 19:56:06 +00:00
\begin{equation}
\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i
\end{equation}
2017-10-08 21:29:45 +00:00
Note that we only sum up the \(\Delta\)--displacements in the control
2017-10-02 19:56:06 +00:00
points \(c_i\) to get the change in position of the point we are
interested in.
In this way every deformed vertex is defined by \[
\textrm{Deform}(v_x) = v_x + \Delta_x(u)
\] with \(u \in [0..1[\) being the variable that connects the
2017-10-08 21:29:45 +00:00
high--detailed vertex--mesh to the low--detailed control--grid. To
actually calculate the new position of the vertex we first have to
calculate the \(u\)--value for each vertex. This is achieved by finding
out the parametrization of \(v\) in terms of \(c_i\) \[
2017-10-02 20:36:46 +00:00
v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i
\] so we can minimize the error between those two: \[
\underset{u}{\argmin}\,Err(u,v_x) = \underset{u}{\argmin}\,2 \cdot \|v_x - \sum_i N_{i,d,\tau_i}(u) c_i\|^2_2
2017-10-08 21:29:45 +00:00
\] As this error--term is quadratic we just derive by \(u\) yielding \[
\begin{array}{rl}
\frac{\partial}{\partial u} & v_x - \sum_i N_{i,d,\tau_i}(u) c_i \\
= & - \sum_i \left( \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u) \right) c_i
\end{array}
\] and do a gradient--descend to approximate the value of \(u\) up to an
2017-10-02 19:56:06 +00:00
\(\epsilon\) of \(0.0001\).
2017-10-08 21:29:45 +00:00
For this we use the Gauss--Newton algorithm\cite{gaussNewton} as the
2017-10-02 19:56:06 +00:00
solution to this problem may not be deterministic, because we usually
2017-10-08 21:29:45 +00:00
have way more vertices than control points (\(\#v~\gg~\#c\)).
2017-10-02 19:56:06 +00:00
\section{\texorpdfstring{Adaption of \ac{FFD} for a
2017-10-08 21:29:45 +00:00
3D--Mesh}{Adaption of for a 3D--Mesh}}\label{adaption-of-for-a-3dmesh}
2017-08-23 19:18:44 +00:00
2017-09-06 15:07:46 +00:00
\label{3dffd}
2017-10-08 21:29:45 +00:00
This is a straightforward extension of the 1D--method presented in the
2017-10-02 19:56:06 +00:00
last chapter. But this time things get a bit more complicated. As we
2017-10-08 21:29:45 +00:00
have a 3--dimensional grid we may have a different amount of
control--points in each direction.
2017-10-02 19:56:06 +00:00
2017-10-08 21:29:45 +00:00
Given \(n,m,o\) control points in \(x,y,z\)--direction each Point on the
2017-10-02 19:56:06 +00:00
curve is defined by
2017-10-02 20:36:46 +00:00
\[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\]
2017-10-02 19:56:06 +00:00
2017-10-08 21:29:45 +00:00
In this case we have three different B--Splines (one for each dimension)
2017-10-02 19:56:06 +00:00
and also 3 variables \(u,v,w\) for each vertex we want to approximate.
Given a target vertex \(\vec{p}^*\) and an initial guess
2017-10-08 21:29:45 +00:00
\(\vec{p}=V(u,v,w)\) we define the error--function for the
gradient--descent as:
2017-10-02 19:56:06 +00:00
\[Err(u,v,w,\vec{p}^{*}) = \vec{p}^{*} - V(u,v,w)\]
And the partial version for just one direction as
2017-10-02 20:36:46 +00:00
\[Err_x(u,v,w,\vec{p}^{*}) = p^{*}_x - \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \]
2017-10-02 19:56:06 +00:00
To solve this we derive partially, like before:
\[
\begin{array}{rl}
2017-10-02 20:36:46 +00:00
\displaystyle \frac{\partial Err_x}{\partial u} & p^{*}_x - \displaystyle \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \\
2017-10-07 23:07:37 +00:00
= & \displaystyle - \sum_i \sum_j \sum_k N'_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x
2017-10-02 19:56:06 +00:00
\end{array}
\]
The other partial derivatives follow the same pattern yielding the
Jacobian:
\[
J(Err(u,v,w)) =
\left(
\begin{array}{ccc}
\frac{\partial Err_x}{\partial u} & \frac{\partial Err_x}{\partial v} & \frac{\partial Err_x}{\partial w} \\
\frac{\partial Err_y}{\partial u} & \frac{\partial Err_y}{\partial v} & \frac{\partial Err_y}{\partial w} \\
\frac{\partial Err_z}{\partial u} & \frac{\partial Err_z}{\partial v} & \frac{\partial Err_z}{\partial w}
\end{array}
\right)
2017-10-07 23:07:37 +00:00
\] \[
\scriptsize
=
\left(
\begin{array}{ccc}
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_x \\
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_y \\
- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_z
\end{array}
\right)
2017-10-02 19:56:06 +00:00
\]
2017-10-08 21:29:45 +00:00
With the Gauss--Newton algorithm we iterate via the formula
2017-10-02 19:56:06 +00:00
\[J(Err(u,v,w)) \cdot \Delta \left( \begin{array}{c} u \\ v \\ w \end{array} \right) = -Err(u,v,w)\]
and use Cramers rule for inverting the small Jacobian and solving this
system of linear equations.
\section{Parametrisierung sinnvoll?}\label{parametrisierung-sinnvoll}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
2017-10-02 19:56:06 +00:00
Nachteile von Parametrisierung
2017-08-23 19:18:44 +00:00
\item
2017-10-02 19:56:06 +00:00
Deformation ist um einen Kontrollpunkt viel direkter zu steuern.
2017-08-23 19:18:44 +00:00
\item
2017-10-08 21:29:45 +00:00
=\textgreater{} DM--FFD?
2017-08-23 19:18:44 +00:00
\end{itemize}
2017-10-07 23:07:37 +00:00
\chapter{\texorpdfstring{Scenarios for testing evolvability criteria
using
\acf{FFD}}{Scenarios for testing evolvability criteria using }}\label{scenarios-for-testing-evolvability-criteria-using}
2017-10-08 19:08:29 +00:00
\label{sec:eval}
2017-10-01 19:04:04 +00:00
\section{Test Scenario: 1D Function
Approximation}\label{test-scenario-1d-function-approximation}
2017-08-23 19:18:44 +00:00
2017-10-01 19:04:04 +00:00
\subsection{Optimierungszenario}\label{optimierungszenario}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
2017-10-08 21:29:45 +00:00
Ebene -\textgreater{} Template--Fit
2017-08-23 19:18:44 +00:00
\end{itemize}
2017-10-01 19:04:04 +00:00
\subsection{Matching in 1D}\label{matching-in-1d}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
Trivial
\end{itemize}
2017-10-01 19:04:04 +00:00
\subsection{Besonderheiten der
2017-08-23 19:18:44 +00:00
Auswertung}\label{besonderheiten-der-auswertung}
\begin{itemize}
\tightlist
\item
Analytische Lösung einzig beste
\item
Ergebnis auch bei Rauschen konstant?
\item
2017-10-08 21:29:45 +00:00
normierter 1--Vektor auf den Gradienten addieren
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
Kegel entsteht
\end{itemize}
\end{itemize}
2017-10-01 19:04:04 +00:00
\section{Test Scenario: 3D Function
Approximation}\label{test-scenario-3d-function-approximation}
2017-08-23 19:18:44 +00:00
2017-10-01 19:04:04 +00:00
\subsection{Optimierungsszenario}\label{optimierungsszenario}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
Ball zu Mario
\end{itemize}
2017-10-01 19:04:04 +00:00
\subsection{Matching in 3D}\label{matching-in-3d}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
alternierende Optimierung
\end{itemize}
2017-10-01 19:04:04 +00:00
\subsection{Besonderheiten der
2017-08-23 19:18:44 +00:00
Optimierung}\label{besonderheiten-der-optimierung}
\begin{itemize}
\tightlist
\item
Analytische Lösung nur bis zur Optimierung der ersten Punkte gültig
\item
Kriterien trotzdem gut
\end{itemize}
2017-10-01 19:04:04 +00:00
\chapter{Evaluation of Scenarios}\label{evaluation-of-scenarios}
2017-08-23 19:18:44 +00:00
2017-10-08 19:08:29 +00:00
\label{sec:res}
2017-10-08 21:29:45 +00:00
\section{Spearman/Pearson--Metriken}\label{spearmanpearsonmetriken}
2017-08-23 19:18:44 +00:00
\begin{itemize}
\tightlist
\item
Was ist das?
\item
Wieso sollte uns das interessieren?
\item
Wieso reicht Monotonie?
\item
Haben wir das gezeigt?
\item
2017-10-01 18:06:41 +00:00
Statistik, Bilder, blah!
2017-08-23 19:18:44 +00:00
\end{itemize}
2017-10-01 19:04:04 +00:00
\section{Results of 1D Function
Approximation}\label{results-of-1d-function-approximation}
\begin{figure}[!ht]
\includegraphics[width=\textwidth]{img/evolution1d/20170830-evolution1D_5x5_100Times-all_appended.png}
\caption{Results 1D}
\end{figure}
\section{Results of 3D Function
Approximation}\label{results-of-3d-function-approximation}
\begin{figure}[!ht]
2017-10-09 00:31:11 +00:00
\includegraphics[width=\textwidth]{img/evolution3d/20171007_3dFit_all_append.png}
2017-10-01 19:04:04 +00:00
\caption{Results 3D}
\end{figure}
2017-08-23 19:18:44 +00:00
\chapter{Schluss}\label{schluss}
2017-10-08 19:08:29 +00:00
\label{sec:dis}
2017-08-23 19:18:44 +00:00
HAHA .. als ob -.-
2017-10-08 21:29:45 +00:00
% \backmatter
2017-08-28 16:39:18 +00:00
\cleardoublepage
2017-10-08 21:29:45 +00:00
\renewcommand\thechapter{\Alph{chapter}}
2017-08-28 16:39:18 +00:00
\chapter*{Appendix}
\addcontentsline{toc}{chapter}{\protect\numberline{}Appendix}
2017-10-08 21:29:45 +00:00
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\setcounter{chapter}{0} % reset section to 1 so its stars I, II, III,...
2017-08-28 16:39:18 +00:00
\pagenumbering{roman}
%%%%%%%%%%%%%%% Literaturverzeichnis %%%%%%%%%%%%%%%
2017-10-07 23:38:30 +00:00
\bibliographystyle{unsrtdin} % \bibliographystyle{natdin}
2017-08-28 16:39:18 +00:00
\bibliography{bibma}
2017-10-08 21:29:45 +00:00
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}Bibliography} % Literaturverzeichnis in das Inhaltsverzeichnis aufnehmen
% \addtocounter{chapter}{1}
2017-08-28 16:39:18 +00:00
\newpage
2017-08-23 19:18:44 +00:00
%%%%%%%%%%%%%%% Anhang %%%%%%%%%%%%%%%
2017-08-28 16:39:18 +00:00
% \clearpage %spaeter alles wieder rein
2017-08-23 19:18:44 +00:00
% % \input{files/appendix}
2017-08-28 16:39:18 +00:00
\input{settings/abkuerzungen}
2017-10-08 21:29:45 +00:00
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}Abbreviations}
% \addtocounter{chapter}{1}
2017-08-28 16:39:18 +00:00
\newpage
2017-10-02 20:13:24 +00:00
% \listofalgorithms
% \addcontentsline{toc}{section}{\protect\numberline{\thesection}List of Algorithms}
% \addtocounter{section}{1}
% \newpage
%
2017-08-28 16:39:18 +00:00
\listoffigures
2017-10-08 21:29:45 +00:00
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}List of Figures}
% \addtocounter{chapter}{1}
\newpage
2017-10-02 20:13:24 +00:00
% \listoftables
2017-08-23 19:18:44 +00:00
\listoftodos
2017-10-08 21:29:45 +00:00
% \addcontentsline{toc}{chapter}{\protect\numberline{\thechapter}TODOs}
% \addtocounter{chapter}{1}
2017-08-28 16:39:18 +00:00
\newpage
2017-08-23 19:18:44 +00:00
% \printindex
%%%%%%%%%%%%%%% Erklaerung %%%%%%%%%%%%%%%
% *\input{settings/declaration}
\include{files/erklaerung}
\end{document}