diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..028fb2d --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "presentation/template/mathjax"] + path = presentation/template/mathjax + url = https://github.com/mathjax/MathJax.git +[submodule "presentation/template/revealjs"] + path = presentation/template/revealjs + url = https://github.com/hakimel/reveal.js.git +[submodule "presentation/template/font-awesome"] + path = presentation/template/font-awesome + url = https://github.com/FortAwesome/Font-Awesome.git diff --git a/presentation/Makefile b/presentation/Makefile new file mode 100755 index 0000000..f401643 --- /dev/null +++ b/presentation/Makefile @@ -0,0 +1,43 @@ +### INPUT & OUTPUT ########################################################## + +MD := $(wildcard *.md) +HTML := $(patsubst %.md, %.html, $(MD)) + +### TEMPLATE CONFIG ######################################################### + +TDIR := ./template +TEMPLATE := $(TDIR)/template.html + + +### EXPLICIT RULES ########################################################## + +.PHONY: html + +html: $(HTML) + +htmlold: $(HTMOLD) + +$(HTML): $(SRC) $(TEMPLATE) + +clean: + rm -f $(HTML) + +### IMPLICIT RULES ########################################################## + +# new syntax with filters +%.html: %.md + cat $< | \ + pandoc \ + --from markdown+link_attributes+smart+line_blocks+emoji \ + --to revealjs \ + --section-divs \ + --no-highlight \ + --mathjax \ + --template $(TEMPLATE) \ + --variable template=$(TDIR) \ + --variable chalkboard=${<:.md=.json} \ + --filter styling \ + --filter cols \ + --filter media \ + --filter clean \ + -o $@ diff --git a/presentation/presentation.html b/presentation/presentation.html new file mode 100644 index 0000000..8ebc515 --- /dev/null +++ b/presentation/presentation.html @@ -0,0 +1,594 @@ + + + + + + + + + Evaluation of the Performance of Randomized FFD Control Grids: Master Thesis + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+
Evaluation of the Performance of Randomized FFD Control Grids
+
Master Thesis
+
Stefan Dresselhaus
+
Graphics & Geometry Group
+
+ + + + + + +
+

Introduction

+

Many modern industrial design processes require advanced optimization methods due to the increased complexity resulting from more and more degrees of freedom as methods refine and/or other methods are used. Examples for this are physical domains like aerodynamics (i.e. drag), fluid dynamics (i.e. throughput of liquid) — where the complexity increases with the temporal and spatial resolution of the simulation — or known hard algorithmic problems in informatics ( i.e. layouting of circuit boards or stacking of 3D–objects). Moreover these are typically not static environments but requirements shift over time or from case to case.

+ +

Evolutionary algorithms cope especially well with these problem domains while addressing all the issues at hand. One of the main concerns in these algorithms is the formulation of the problems in terms of a genome and fitness–function. While one can typically use an arbitrary cost–function for the fitness–functions (i.e. amount of drag, amount of space, etc.), the translation of the problem–domain into a simple parametric representation (the genome) can be challenging.

+

This translation is often necessary as the target of the optimization may have too many degrees of freedom for a reasonable computation. In the example of an aerodynamic simulation of drag onto an object, those object–designs tend to have a high number of vertices to adhere to various requirements (visual, practical, physical, etc.). A simpler representation of the same object in only a few parameters that manipulate the whole in a sensible matter are desirable, as this often decreases the computation time significantly.

+

Additionally one can exploit the fact, that drag in this case is especially sensitive to non–smooth surfaces, so that a smooth local manipulation of the surface as a whole is more advantageous than merely random manipulation of the vertices.

+

The quality of such a low–dimensional representation in biological evolution is strongly tied to the notion of evolvability, as the parametrization of the problem has serious implications on the convergence speed and the quality of the solution. However, there is no consensus on how evolvability is defined and the meaning varies from context to context. As a consequence there is need for some criteria we can measure, so that we are able to compare different representations to learn and improve upon these.

+ +

One example of such a general representation of an object is to generate random points and represent vertices of an object as distances to these points — for example via . If one (or the algorithm) would move such a point the object will get deformed only locally (due to the ). As this results in a simple mapping from the parameter–space onto the object one can try out different representations of the same object and evaluate which criteria may be suited to describe this notion of evolvability. This is exactly what Richter et al. have done.

+

As we transfer the results of Richter et al. from using as a representation to manipulate geometric objects to the use of we will use the same definition for evolvability the original author used, namely regularity, variability, and improvement potential. We introduce these term in detail in Chapter . In the original publication the author could show a correlation between these evolvability–criteria with the quality and convergence speed of such optimization.

+

We will replicate the same setup on the same objects but use instead of to create a local deformation near the control–points and evaluate if the evolution–criteria still work as a predictor for evolvability of the representation given the different deformation scheme, as suspected in .

+

First we introduce different topics in isolation in Chapter . We take an abstract look at the definition of for a one–dimensional line (in ) and discuss why this is a sensible deformation function (in ). Then we establish some background–knowledge of evolutionary algorithms (in ) and why this is useful in our domain (in ) followed by the definition of the different evolvability–criteria established in (in ).

+

In Chapter we take a look at our implementation of and the adaptation for 3D–meshes that were used. Next, in Chapter , we describe the different scenarios we use to evaluate the different evolvability–criteria incorporating all aspects introduced in Chapter . Following that, we evaluate the results in Chapter with further on discussion, summary and outlook in Chapter .

+
+
+

Background

+ +
+

What is ?

+ +

First of all we have to establish how a works and why this is a good tool for deforming geometric objects (especially meshes in our case) in the first place. For simplicity we only summarize the 1D–case from here and go into the extension to the 3D case in chapter .

+

The main idea of is to create a function \(s : [0,1[^d \mapsto \mathbb{R}^d\) that spans a certain part of a vector–space and is only linearly parametrized by some special control–points \(p_i\) and an constant attribution–function \(a_i(u)\), so \[ +s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i} +\] can be thought of a representation of the inside of the convex hull generated by the control–points where each position inside can be accessed by the right \(u \in [0,1[^d\).

+ +

In the 1–dimensional example in figure , the control–points are indicated as red dots and the colour–gradient should hint at the \(u\)–values ranging from \(0\) to \(1\).

+

We now define a by the following:
+Given an arbitrary number of points \(p_i\) alongside a line, we map a scalar value \(\tau_i \in [0,1[\) to each point with \(\tau_i < \tau_{i+1} \forall i\) according to the position of \(p_i\) on said line. Additionally, given a degree of the target polynomial \(d\) we define the curve \(N_{i,d,\tau_i}(u)\) as follows:

+\[\begin{equation} \label{eqn:ffd1d1} +N_{i,0,\tau}(u) = \begin{cases} 1, & u \in [\tau_i, \tau_{i+1}[ \\ 0, & \mbox{otherwise} \end{cases} +\end{equation}\] +

and \[\begin{equation} \label{eqn:ffd1d2} +N_{i,d,\tau}(u) = \frac{u-\tau_i}{\tau_{i+d}} N_{i,d-1,\tau}(u) + \frac{\tau_{i+d+1} - u}{\tau_{i+d+1}-\tau_{i+1}} N_{i+1,d-1,\tau}(u) +\end{equation}\]

+

If we now multiply every \(p_i\) with the corresponding \(N_{i,d,\tau_i}(u)\) we get the contribution of each point \(p_i\) to the final curve–point parametrized only by \(u \in [0,1[\). As can be seen from we only access points \([p_i..p_{i+d}]\) for any given \(i\)1, which gives us, in combination with choosing \(p_i\) and \(\tau_i\) in order, only a local interference of \(d+1\) points.

+

We can even derive this equation straightforward for an arbitrary \(N\)2:

+

\[\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)\]

+

For a B–Spline \[s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i\] these derivations yield \(\left(\frac{\partial}{\partial u}\right)^d s(u) = 0\).

+

Another interesting property of these recursive polynomials is that they are continuous (given \(d \ge 1\)) as every \(p_i\) gets blended in between \(\tau_i\) and \(\tau_{i+d}\) and out between \(\tau_{i+1}\), and \(\tau_{i+d+1}\) as can bee seen from the two coefficients in every step of the recursion.

+

This means that all changes are only a local linear combination between the control–point \(p_i\) to \(p_{i+d+1}\) and consequently this yields to the convex–hull–property of B–Splines — meaning, that no matter how we choose our coefficients, the resulting points all have to lie inside convex–hull of the control–points.

+

For a given point \(s_i\) we can then calculate the contributions \(u_{i,j}~:=~N_{j,d,\tau}\) of each control point \(p_j\) to get the projection from the control–point–space into the object–space: \[ +s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p} +\] or written for all points at the same time: \[ +\vec{s} = \vec{U} \vec{p} +\] where \(\vec{U}\) is the \(n \times m\) transformation–matrix (later on called deformation matrix) for \(n\) object–space–points and \(m\) control–points.

+ +

Furthermore B–Spline–basis–functions form a partition of unity for all, but the first and last \(d\) control–points. Therefore we later on use the border–points \(d+1\) times, such that \(\sum_j u_{i,j} p_j = p_i\) for these points.

+

The locality of the influence of each control–point and the partition of unity was beautifully pictured by Brunet, which we included here as figure .

+
+

Why is a good deformation function?

+ +

The usage of as a tool for manipulating follows directly from the properties of the polynomials and the correspondence to the control–points. Having only a few control–points gives the user a nicer high–level–interface, as she only needs to move these points and the model follows in an intuitive manner. The deformation is smooth as the underlying polygon is smooth as well and affects as many vertices of the model as needed. Moreover the changes are always local so one risks not any change that a user cannot immediately see.

+

But there are also disadvantages of this approach. The user loses the ability to directly influence vertices and even seemingly simple tasks as creating a plateau can be difficult to achieve.

+

This disadvantages led to the formulation of in which the user directly interacts with the surface–mesh. All interactions will be applied proportionally to the control–points that make up the parametrization of the interaction–point itself yielding a smooth deformation of the surface at the surface without seemingly arbitrary scattered control–points. Moreover this increases the efficiency of an evolutionary optimization, which we will use later on.

+ +

But this approach also has downsides as can be seen in figure , as the tessellation of the invisible grid has a major impact on the deformation itself.

+

All in all and are still good ways to deform a high–polygon mesh albeit the downsides.

+
+
+
+

What is evolutionary optimization?

+ +

In this thesis we are using an evolutionary optimization strategy to solve the problem of finding the best parameters for our deformation. This approach, however, is very generic and we introduce it here in a broader sense.

+ +

The general shape of an evolutionary algorithm (adapted from ) is outlined in Algorithm . Here, \(P(t)\) denotes the population of parameters in step \(t\) of the algorithm. The population contains \(\mu\) individuals \(a_i\) from the possible individual–set \(I\) that fit the shape of the parameters we are looking for. Typically these are initialized by a random guess or just zero. Further on we need a so–called fitness–function \(\Phi : I \mapsto M\) that can take each parameter to a measurable space \(M\) (usually \(M = \mathbb{R}\)) along a convergence–function \(c : I \mapsto \mathbb{B}\) that terminates the optimization.

+

Biologically speaking the set \(I\) corresponds to the set of possible genotypes while \(M\) represents the possible observable phenotypes. Genotypes define all initial properties of an individual, but their properties are not directly observable. It is the genes, that evolve over time (and thus correspond to the parameters we are tweaking in our algorithms or the genes in nature), but only the phenotypes make certain behaviour observable (algorithmically through our fitness–function, biologically by the ability to survive and produce offspring). Any individual in our algorithm thus experience a biologically motivated life cycle of inheriting genes from the parents, modified by mutations occurring, performing according to a fitness–metric, and generating offspring based on this. Therefore each iteration in the while–loop above is also often named generation.

+

One should note that there is a subtle difference between fitness–function and a so called genotype–phenotype–mapping. The first one directly applies the genotype–phenotype–mapping and evaluates the performance of an individual, thus going directly from genes/parameters to reproduction–probability/score. In a concrete example the genotype can be an arbitrary vector (the genes), the phenotype is then a deformed object, and the performance can be a single measurement like an air–drag–coefficient. The genotype–phenotype–mapping would then just be the generation of different objects from that starting–vector, whereas the fitness–function would go directly from such a starting–vector to the coefficient that we want to optimize.

+

The main algorithm just repeats the following steps:

+
    +
  • Recombine with a recombination–function \(r : I^{\mu} \mapsto I^{\lambda}\) to generate \(\lambda\) new individuals based on the characteristics of the \(\mu\) parents.
    +This makes sure that the next guess is close to the old guess.
  • +
  • Mutate with a mutation–function \(m : I^{\lambda} \mapsto I^{\lambda}\) to introduce new effects that cannot be produced by mere recombination of the parents.
    +Typically this just adds minor defects to individual members of the population like adding a random gaussian noise or amplifying/dampening random parts.
  • +
  • Selection takes a selection–function \(s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu\) that selects from the previously generated \(I^\lambda\) children and optionally also the parents (denoted by the set \(Q\) in the algorithm) using the fitness–function \(\Phi\). The result of this operation is the next Population of \(\mu\) individuals.
  • +
+

All these functions can (and mostly do) have a lot of hidden parameters that can be changed over time. A good overview of this is given in , so we only give a small excerpt here.

+

For example the mutation can consist of merely a single \(\sigma\) determining the strength of the gaussian defects in every parameter — or giving a different \(\sigma\) to every component of those parameters. An even more sophisticated example would be the 1/5 success rule from .

+

Also in the selection–function it may not be wise to only take the best–performing individuals, because it may be that the optimization has to overcome a barrier of bad fitness to achieve a better local optimum.

+

Recombination also does not have to be mere random choosing of parents, but can also take ancestry, distance of genes or groups of individuals into account.

+
+
+

Advantages of evolutionary algorithms

+ +

The main advantage of evolutionary algorithms is the ability to find optima of general functions just with the help of a given fitness–function. Components and techniques for evolutionary algorithms are specifically known to help with different problems arising in the domain of optimization. An overview of the typical problems are shown in figure .

+ +

Most of the advantages stem from the fact that a gradient–based procedure has usually only one point of observation from where it evaluates the next steps, whereas an evolutionary strategy starts with a population of guessed solutions. Because an evolutionary strategy can be modified according to the problem–domain (i.e. by the ideas given above) it can also approximate very difficult problems in an efficient manner and even self–tune parameters depending on the ancestry at runtime3.

+

If an analytic best solution exists and is easily computable (i.e. because the error–function is convex) an evolutionary algorithm is not the right choice. Although both converge to the same solution, the analytic one is usually faster.

+

But in reality many problems have no analytic solution, because the problem is either not convex or there are so many parameters that an analytic solution (mostly meaning the equivalence to an exhaustive search) is computationally not feasible. Here evolutionary optimization has one more advantage as one can at least get suboptimal solutions fast, which then refine over time and still converge to a decent solution much faster than an exhaustive search.

+
+
+

Criteria for the evolvability of linear deformations

+ +

As we have established in chapter , we can describe a deformation by the formula \[ +\vec{S} = \vec{U}\vec{P} +\] where \(\vec{S}\) is a \(n \times d\) matrix of vertices4, \(\vec{U}\) are the (during parametrization) calculated deformation–coefficients and \(P\) is a \(m \times d\) matrix of control–points that we interact with during deformation.

+

We can also think of the deformation in terms of differences from the original coordinates \[ +\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P} +\] which is isomorphic to the former due to the linearity of the deformation. One can see in this way, that the way the deformation behaves lies solely in the entries of \(\vec{U}\), which is why the three criteria focus on this.

+
+

Variability

+

In variability is defined as \[\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},\] whereby \(\vec{U}\) is the \(n \times m\) deformation–Matrix used to map the \(m\) control–points onto the \(n\) vertices.

+

Given \(n = m\), an identical number of control–points and vertices, this quotient will be \(=1\) if all control–points are independent of each other and the solution is to trivially move every control–point onto a target–point.

+

In praxis the value of \(V(\vec{U})\) is typically \(\ll 1\), because there are only few control–points for many vertices, so \(m \ll n\).

+

This criterion should correlate to the degrees of freedom the given parametrization has. This can be seen from the fact, that \(\mathrm{rank}(\vec{U})\) is limited by \(\min(m,n)\) and — as \(n\) is constant — can never exceed \(n\).

+

The rank itself is also interesting, as control–points could theoretically be placed on top of each other or be linear dependent in another way — but will in both cases lower the rank below the number of control–points \(m\) and are thus measurable by the variability.

+
+
+

Regularity

+

Regularity is defined as \[\mathrm{regularity}(\vec{U}) := \frac{1}{\kappa(\vec{U})} = \frac{\sigma_{min}}{\sigma_{max}}\] where \(\sigma_{min}\) and \(\sigma_{max}\) are the smallest and greatest right singular value of the deformation–matrix \(\vec{U}\).

+

As we deform the given Object only based on the parameters as \(\vec{p} \mapsto f(\vec{x} + \vec{U}\vec{p})\) this makes sure that \(\|\vec{Up}\| \propto \|\vec{p}\|\) when \(\kappa(\vec{U}) \approx 1\). The inversion of \(\kappa(\vec{U})\) is only performed to map the criterion–range to \([0..1]\), where \(1\) is the optimal value and \(0\) is the worst value.

+

On the one hand this criterion should be characteristic for numeric stability and on the other hand for the convergence speed of evolutionary algorithms as it is tied to the notion of locality.

+
+
+

Improvement Potential

+

In contrast to the general nature of variability and regularity, which are agnostic of the fitness–function at hand, the third criterion should reflect a notion of the potential for optimization, taking a guess into account.

+

Most of the times some kind of gradient \(g\) is available to suggest a direction worth pursuing; either from a previous iteration or by educated guessing. We use this to guess how much change can be achieved in the given direction.

+

The definition for an improvement potential \(P\) is: \[ +\mathrm{potential}(\vec{U}) := 1 - \|(\vec{1} - \vec{UU}^+)\vec{G}\|^2_F +\] given some approximate \(n \times d\) fitness–gradient \(\vec{G}\), normalized to \(\|\vec{G}\|_F = 1\), whereby \(\|\cdot\|_F\) denotes the Frobenius–Norm.

+
+
+
+
+

Implementation of

+ +

The general formulation of B–Splines has two free parameters \(d\) and \(\tau\) which must be chosen beforehand.

+

As we usually work with regular grids in our we define \(\tau\) statically as \(\tau_i = \nicefrac{i}{n}\) whereby \(n\) is the number of control–points in that direction.

+

\(d\) defines the degree of the B–Spline–Function (the number of times this function is differentiable) and for our purposes we fix \(d\) to \(3\), but give the formulas for the general case so it can be adapted quite freely.

+
+

Adaption of

+ +

As we have established in Chapter we can define an –displacement as \[\begin{equation} +\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i +\end{equation}\]

+

Note that we only sum up the \(\Delta\)–displacements in the control–points \(c_i\) to get the change in position of the point we are interested in.

+

In this way every deformed vertex is defined by \[ +\textrm{Deform}(v_x) = v_x + \Delta_x(u) +\] with \(u \in [0..1[\) being the variable that connects the high–detailed vertex–mesh to the low–detailed control–grid. To actually calculate the new position of the vertex we first have to calculate the \(u\)–value for each vertex. This is achieved by finding out the parametrization of \(v\) in terms of \(c_i\) \[ +v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i +\] so we can minimize the error between those two: \[ +\underset{u}{\argmin}\,Err(u,v_x) = \underset{u}{\argmin}\,2 \cdot \|v_x - \sum_i N_{i,d,\tau_i}(u) c_i\|^2_2 +\] As this error–term is quadratic we just derive by \(u\) yielding \[ +\begin{array}{rl} +\frac{\partial}{\partial u} & v_x - \sum_i N_{i,d,\tau_i}(u) c_i \\ += & - \sum_i \left( \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u) \right) c_i +\end{array} +\] and do a gradient–descend to approximate the value of \(u\) up to an \(\epsilon\) of \(0.0001\).

+

For this we employ the Gauss–Newton algorithm, which converges into the least–squares solution. An exact solution of this problem is impossible most of the time, because we usually have way more vertices than control–points (\(\#v~\gg~\#c\)).

+
+
+

Adaption of for a 3D–Mesh

+ +

This is a straightforward extension of the 1D–method presented in the last chapter. But this time things get a bit more complicated. As we have a 3–dimensional grid we may have a different amount of control–points in each direction.

+

Given \(n,m,o\) control–points in \(x,y,z\)–direction each Point on the curve is defined by \[V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.\]

+

In this case we have three different B–Splines (one for each dimension) and also 3 variables \(u,v,w\) for each vertex we want to approximate.

+

Given a target vertex \(\vec{p}^*\) and an initial guess \(\vec{p}=V(u,v,w)\) we define the error–function for the gradient–descent as:

+

\[Err(u,v,w,\vec{p}^{*}) = \vec{p}^{*} - V(u,v,w)\]

+

And the partial version for just one direction as

+

\[Err_x(u,v,w,\vec{p}^{*}) = p^{*}_x - \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \]

+

To solve this we derive partially, like before:

+

\[ +\begin{array}{rl} + \displaystyle \frac{\partial Err_x}{\partial u} & p^{*}_x - \displaystyle \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \\ + = & \displaystyle - \sum_i \sum_j \sum_k N'_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x +\end{array} +\]

+

The other partial derivatives follow the same pattern yielding the Jacobian:

+

\[ +J(Err(u,v,w)) = +\left( +\begin{array}{ccc} +\frac{\partial Err_x}{\partial u} & \frac{\partial Err_x}{\partial v} & \frac{\partial Err_x}{\partial w} \\ +\frac{\partial Err_y}{\partial u} & \frac{\partial Err_y}{\partial v} & \frac{\partial Err_y}{\partial w} \\ +\frac{\partial Err_z}{\partial u} & \frac{\partial Err_z}{\partial v} & \frac{\partial Err_z}{\partial w} +\end{array} +\right) +\] \[ +\scriptsize += +\left( +\begin{array}{ccc} +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_x \\ +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_y \\ +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_z +\end{array} +\right) +\]

+

With the Gauss–Newton algorithm we iterate via the formula \[J(Err(u,v,w)) \cdot \Delta \left( \begin{array}{c} u \\ v \\ w \end{array} \right) = -Err(u,v,w)\] and use Cramer’s rule for inverting the small Jacobian and solving this system of linear equations.

+

As there is no strict upper bound of the number of iterations for this algorithm, we just iterate it long enough to be within the given \(\epsilon\)–error above. This takes — depending on the shape of the object and the grid — about \(3\) to \(5\) iterations that we observed in practice.

+

Another issue that we observed in our implementation is, that multiple local optima may exist on self–intersecting grids. We solve this problem by defining self–intersecting grids to be invalid and do not test any of them.

+

This is not such a big problem as it sounds at first, as self–intersections mean, that control–points being further away from a given vertex have more influence over the deformation than control–points closer to this vertex. Also this contradicts the notion of locality that we want to achieve and deemed beneficial for a good behaviour of the evolutionary algorithm.

+
+
+ +
+
+

As mentioned in chapter , the way of choosing the representation to map the general problem (mesh–fitting/optimization in our case) into a parameter–space is very important for the quality and runtime of evolutionary algorithms.

+
+
+ +
+
+

Because our control–points are arranged in a grid, we can accurately represent each vertex–point inside the grids volume with proper B–Spline–coefficients between \([0,1[\) and — as a consequence — we have to embed our object into it (or create constant “dummy”–points outside).

+

The great advantage of B–Splines is the local, direct impact of each control point without having a \(1:1\)–correlation, and a smooth deformation. While the advantages are great, the issues arise from the problem to decide where to place the control–points and how many to place at all.

+ +

One would normally think, that the more control–points you add, the better the result will be, but this is not the case for our B–Splines. Given any point \(\vec{p}\) only the \(2 \cdot (d-1)\) control–points contribute to the parametrization of that point5. This means, that a high resolution can have many control–points that are not contributing to any point on the surface and are thus completely irrelevant to the solution.

+

We illustrate this phenomenon in figure , where the red central points are not relevant for the parametrization of the circle. This leads to artefacts in the deformation–matrix \(\vec{U}\), as the columns corresponding to those control–points are \(0\).

+

This also leads to useless increased complexity, as the parameters corresponding to those points will never have any effect, but a naive algorithm will still try to optimize them yielding numeric artefacts in the best and non–terminating or ill–defined solutions6 at worst.

+

One can of course neglect those columns and their corresponding control–points, but this raises the question why they were introduced in the first place. We will address this in a special scenario in .

+

For our tests we chose different uniformly sized grids and added noise onto each control–point7 to simulate different starting–conditions.

+
+
+
+

Scenarios for testing evolvability–criteria using

+ +

In our experiments we use the same two testing–scenarios, that were also used by Richter et al. The first scenario deforms a plane into a shape originally defined by Giannelli et al., where we setup control–points in a 2–dimensional manner and merely deform in the height–coordinate to get the resulting shape.

+

In the second scenario we increase the degrees of freedom significantly by using a 3–dimensional control–grid to deform a sphere into a face, so each control point has three degrees of freedom in contrast to first scenario.

+
+

Test Scenario: 1D Function Approximation

+

In this scenario we used the shape defined by Giannelli et al., which is also used by Richter et al. using the same discretization to \(150 \times 150\) points for a total of \(n = 22\,500\) vertices. The shape is given by the following definition \[\begin{equation} +t(x,y) = +\begin{cases} +0.5 \cos(4\pi \cdot q^{0.5}) + 0.5 & q(x,y) < \frac{1}{16},\\ +2(y-x) & 0 < y-x < 0.5,\\ +1 & 0.5 < y - x +\end{cases} +\end{equation}\] with \((x,y) \in [0,2] \times [0,1]\) and \(q(x,y)=(x-1.5)^2 + (y-0.5)^2\), which we have visualized in figure .

+ +

As the starting–plane we used the same shape, but set all \(z\)–coordinates to \(0\), yielding a flat plane, which is partially already correct.

+

Regarding the fitness–function \(\mathrm{f}(\vec{p})\), we use the very simple approach of calculating the squared distances for each corresponding vertex \[\begin{equation} +\mathrm{f}(\vec{p}) = \sum_{i=1}^{n} \|(\vec{Up})_i - t_i\|_2^2 = \|\vec{Up} - \vec{t}\|^2 \rightarrow \min +\end{equation}\] where \(t_i\) are the respective target–vertices to the parametrized source–vertices8 with the current deformation–parameters \(\vec{p} = (p_1,\dots, p_m)\). We can do this one–to–one–correspondence because we have exactly the same number of source and target–vertices do to our setup of just flattening the object.

+

This formula is also the least–squares approximation error for which we can compute the analytic solution \(\vec{p^{*}} = \vec{U^+}\vec{t}\), yielding us the correct gradient in which the evolutionary optimizer should move.

+
+
+

Test Scenario: 3D Function Approximation

+

Opposed to the 1–dimensional scenario before, the 3–dimensional scenario is much more complex — not only because we have more degrees of freedom on each control point, but also, because the fitness–function we will use has no known analytic solution and multiple local minima.

+ +

First of all we introduce the set up: We have given a triangulated model of a sphere consisting of \(10\,807\) vertices, that we want to deform into a the target–model of a face with a total of \(12\,024\) vertices. Both of these Models can be seen in figure .

+

Opposed to the 1D–case we cannot map the source and target–vertices in a one–to–one–correspondence, which we especially need for the approximation of the fitting–error. Hence we state that the error of one vertex is the distance to the closest vertex of the respective other model and sum up the error from the source and target.

+

We therefore define the fitness–function to be:

+\[\begin{equation} +\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} - +\vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}} ++ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} - +\vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}} ++ \lambda \cdot \textrm{regularization}(\vec{P}) +\label{eq:fit3d} +\end{equation}\] +

where \(\vec{c_T(s_i)}\) denotes the target–vertex that is corresponding to the source–vertex \(\vec{s_i}\) and \(\vec{c_S(t_i)}\) denotes the source–vertex that corresponds to the target–vertex \(\vec{t_i}\). Note that the target–vertices are given and fixed by the target–model of the face we want to deform into, whereas the source–vertices vary depending on the chosen parameters \(\vec{P}\), as those get calculated by the previously introduces formula \(\vec{S} = \vec{UP}\) with \(\vec{S}\) being the \(n \times 3\)–matrix of source–vertices, \(\vec{U}\) the \(n \times m\)–matrix of calculated coefficients for the — analog to the 1D case — and finally \(\vec{P}\) being the \(m \times 3\)–matrix of the control–grid defining the whole deformation.

+

As regularization–term we add a weighted Laplacian of the deformation that has been used before by Aschenbach et al. on similar models and was shown to lead to a more precise fit. The Laplacian \[\begin{equation} +\mathrm{regularization}(\vec{P}) = \frac{1}{\sum_i A_i} \sum_{i=1}^n A_i \cdot \left( \sum_{\vec{s}_j \in \mathcal{N}(\vec{s}_i)} w_j \cdot \|\Delta \vec{s}_j - \Delta \vec{s}_i\|^2 \right) +\label{eq:reg3d} +\end{equation}\] is determined by the cotangent weighted displacement \(w_j\) of the to \(s_i\) connected vertices \(\mathcal{N}(s_i)\) and \(A_i\) is the Voronoi–area of the corresponding vertex \(\vec{s_i}\). We leave out the \(\vec{R}_i\)–term from the original paper as our deformation is merely linear.

+

This regularization–weight gives us a measure of stiffness for the material that we will influence via the \(\lambda\)–coefficient to start out with a stiff material that will get more flexible per iteration. As a side–effect this also limits the effects of overagressive movement of the control–points in the beginning of the fitting process and thus should limit the generation of ill–defined grids mentioned in section .

+
+
+
+

Evaluation of Scenarios

+ +

To compare our results to the ones given by Richter et al., we also use Spearman’s rank correlation coefficient. Opposed to other popular coefficients, like the Pearson correlation coefficient, which measures a linear relationship between variables, the Spearman’s coefficient assesses how well an arbitrary monotonic function can describe the relationship between two variables, without making any assumptions about the frequency distribution of the variables.

+

As we don’t have any prior knowledge if any of the criteria is linear and we are just interested in a monotonic relation between the criteria and their predictive power, the Spearman’s coefficient seems to fit out scenario best and was also used before by Richter et al.

+

For interpretation of these values we follow the same interpretation used in , based on : The coefficient intervals \(r_S \in [0,0.2[\), \([0.2,0.4[\), \([0.4,0.6[\), \([0.6,0.8[\), and \([0.8,1]\) are classified as very weak, weak, moderate, strong and very strong. We interpret p–values smaller than \(0.01\) as significant and cut off the precision of p–values after four decimal digits (thus often having a p–value of \(0\) given for p–values \(< 10^{-4}\)).

+

As we are looking for anti–correlation (i.e. our criterion should be maximized indicating a minimal result in — for example — the reconstruction–error) instead of correlation we flip the sign of the correlation–coefficient for readability and to have the correlation–coefficients be in the classification–range given above.

+

For the evolutionary optimization we employ the of the shark3.1 library , as this algorithm was used by as well. We leave the parameters at their sensible defaults as further explained in .

+
+

Procedure: 1D Function Approximation

+ +

For our setup we first compute the coefficients of the deformation–matrix and use the formulas for variability and regularity to get our predictions. Afterwards we solve the problem analytically to get the (normalized) correct gradient that we use as guess for the improvement potential. To further test the improvement potential we also consider a distorted gradient \(\vec{g}_{\mathrm{d}}\): \[ +\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|} +\] where \(\mathbb{1}\) is the vector consisting of \(1\) in every dimension, \(\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}\) is the calculated correct gradient, and \(\mu\) is used to blend between \(\vec{g}_\mathrm{c}\) and \(\mathbb{1}\). As we always start with a gradient of \(p = \mathbb{0}\) this means we can shorten the definition of \(\vec{g}_\mathrm{c}\) to \(\vec{g}_\mathrm{c} = \vec{p^{*}}\).

+ +

We then set up a regular 2–dimensional grid around the object with the desired grid resolutions. To generate a testcase we then move the grid–vertices randomly inside the x–y–plane. As self–intersecting grids get tricky to solve with our implemented newtons–method (see section ) we avoid the generation of such self–intersecting grids for our testcases.

+

To achieve that we generated a gaussian distributed number with \(\mu = 0, \sigma=0.25\) and clamped it to the range \([-0.25,0.25]\). We chose such an \(r \in [-0.25,0.25]\) per dimension and moved the control–points by that factor towards their respective neighbours9.

+

In other words we set \[\begin{equation*} +p_i = +\begin{cases} + p_i + (p_i - p_{i-1}) \cdot r, & \textrm{if } r \textrm{ negative} \\ + p_i + (p_{i+1} - p_i) \cdot r, & \textrm{if } r \textrm{ positive} +\end{cases} +\end{equation*}\] in each dimension separately.

+

An Example of such a testcase can be seen for a \(7 \times 4\)–grid in figure .

+
+
+

Results of 1D Function Approximation

+

In the case of our 1D–Optimization–problem, we have the luxury of knowing the analytical solution to the given problem–set. We use this to experimentally evaluate the quality criteria we introduced before. As an evolutional optimization is partially a random process, we use the analytical solution as a stopping–criteria. We measure the convergence speed as number of iterations the evolutional algorithm needed to get within \(1.05 \times\) of the optimal solution.

+

We used different regular grids that we manipulated as explained in Section with a different number of control–points. As our grids have to be the product of two integers, we compared a \(5 \times 5\)–grid with \(25\) control–points to a \(4 \times 7\) and \(7 \times 4\)–grid with \(28\) control–points. This was done to measure the impact an improper  setup could have and how well this is displayed in the criteria we are examining.

+

Additionally we also measured the effect of increasing the total resolution of the grid by taking a closer look at \(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids.

+
+

Variability

+ +

Variability should characterize the potential for design space exploration and is defined in terms of the normalized rank of the deformation matrix \(\vec{U}\): \(V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}\), whereby \(n\) is the number of vertices. As all our tested matrices had a constant rank (being \(m = x \cdot y\) for a \(x \times y\) grid), we have merely plotted the errors in the box plot in figure

+

It is also noticeable, that although the \(7 \times 4\) and \(4 \times 7\) grids have a higher variability, they perform not better than the \(5 \times 5\) grid. Also the \(7 \times 4\) and \(4 \times 7\) grids differ distinctly from each other with a mean\(\pm\)sigma of \(233.09 \pm 12.32\) for the former and \(286.32 \pm 22.36\) for the latter, although they have the same number of control–points. This is an indication of an impact a proper or improper grid–setup can have. We do not draw scientific conclusions from these findings, as more research on non–squared grids seem necessary.

+

Leaving the issue of the grid–layout aside we focused on grids having the same number of prototypes in every dimension. For the \(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) grids we found a very strong correlation (\(-r_S = 0.94, p = 0\)) between the variability and the evolutionary error.

+
+
+

Regularity

+ + +

Regularity should correspond to the convergence speed (measured in iteration–steps of the evolutionary algorithm), and is computed as inverse condition number \(\kappa(\vec{U})\) of the deformation–matrix.

+

As can be seen from table , we could only show a weak correlation in the case of a \(5 \times 5\) grid. As we increment the number of control–points the correlation gets worse until it is completely random in a single dataset. Taking all presented datasets into account we even get a strong correlation of \(- r_S = -0.72, p = 0\), that is opposed to our expectations.

+

To explain this discrepancy we took a closer look at what caused these high number of iterations. In figure we also plotted the improvement potential against the steps next to the regularity–plot. Our theory is that the very strong correlation (\(-r_S = -0.82, p=0\)) between improvement potential and number of iterations hints that the employed algorithm simply takes longer to converge on a better solution (as seen in figure and ) offsetting any gain the regularity–measurement could achieve.

+
+
+

Improvement Potential

+ +

The improvement potential should correlate to the quality of the fitting–result. We plotted the results for the tested grid–sizes \(5 \times 5\), \(7 \times 7\) and \(10 \times 10\) in figure . We tested the \(4 \times 7\) and \(7 \times 4\) grids as well, but omitted them from the plot.

+

Additionally we tested the results for a distorted gradient described in with a \(\mu\)–value of \(0.25\), \(0.5\), \(0,75\), and \(1.0\) for the \(5 \times 5\) grid and with a \(\mu\)–value of \(0.5\) for all other cases.

+

All results show the identical very strong and significant correlation with a Spearman–coefficient of \(- r_S = 1.0\) and p–value of \(0\).

+

These results indicate, that \(\|\mathbb{1} - \vec{U}\vec{U}^{+}\|_F\) is close to \(0\), reducing the impacts of any kind of gradient. Nevertheless, the improvement potential seems to be suited to make estimated guesses about the quality of a fit, even lacking an exact gradient.

+
+
+
+

Procedure: 3D Function Approximation

+ +

As explained in section in detail, we do not know the analytical solution to the global optimum. Additionally we have the problem of finding the right correspondences between the original sphere–model and the target–model, as they consist of \(10\,807\) and \(12\,024\) vertices respectively, so we cannot make a one–to–one–correspondence between them as we did in the one–dimensional case.

+

Initially we set up the correspondences \(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\) to be the respectively closest vertices of the other model. We then calculate the analytical solution given these correspondences via \(\vec{P^{*}} = \vec{U^+}\vec{T}\), and also use the first solution as guessed gradient for the calculation of the improvement potential, as the optimal solution is not known. We then let the evolutionary algorithm run up within \(1.05\) times the error of this solution and afterwards recalculate the correspondences \(\vec{c_T(\dots)}\) and \(\vec{c_S(\dots)}\).

+ +

For the next step we then halve the regularization–impact \(\lambda\) (starting at \(1\)) of our fitness–function () and calculate the next incremental solution \(\vec{P^{*}} = \vec{U^+}\vec{T}\) with the updated correspondences (again, mapping each vertex to its closest neighbour in the respective other model) to get our next target–error. We repeat this process as long as the target–error keeps decreasing and use the number of these iterations as measure of the convergence speed. As the resulting evolutional error without regularization is in the numeric range of \(\approx 100\), whereas the regularization is numerically \(\approx 7000\) we need at least \(10\) to \(15\) iterations until the regularization–effect wears off.

+

The grid we use for our experiments is just very coarse due to computational limitations. We are not interested in a good reconstruction, but an estimate if the mentioned evolvability–criteria are good.

+

In figure we show an example setup of the scene with a \(4\times 4\times 4\)–grid. Identical to the 1–dimensional scenario before, we create a regular grid and move the control–points in the exact same random manner between their neighbours as described in section , but in three instead of two dimensions10.

+ +

As is clearly visible from figure , the target–model has many vertices in the facial area, at the ears and in the neck–region. Therefore we chose to increase the grid–resolutions for our tests in two different dimensions and see how well the criteria predict a suboptimal placement of these control–points.

+
+
+

Results of 3D Function Approximation

+

In the 3D–Approximation we tried to evaluate further on the impact of the grid–layout to the overall criteria. As the target–model has many vertices in concentrated in the facial area we start from a \(4 \times 4 \times 4\) grid and only increase the number of control–points in one dimension, yielding a resolution of \(7 \times 4 \times 4\) and \(4 \times 4 \times 7\) respectively. We visualized those two grids in figure .

+

To evaluate the performance of the evolvability–criteria we also tested a more neutral resolution of \(4 \times 4 \times 4\), \(5 \times 5 \times 5\), and \(6 \times 6 \times 6\) — similar to the 1D–setup.

+ +
+

Variability

+ + +

Similar to the 1D case all our tested matrices had a constant rank (being \(m = x \cdot y \cdot z\) for a \(x \times y \times z\) grid), so we again have merely plotted the errors in the box plot in figure .

+

As expected the \(\mathrm{X} \times 4 \times 4\) grids performed slightly better than their \(4 \times 4 \times \mathrm{X}\) counterparts with a mean\(\pm\)sigma of \(101.25 \pm 7.45\) to \(102.89 \pm 6.74\) for \(\mathrm{X} = 5\) and \(85.37 \pm 7.12\) to \(89.22 \pm 6.49\) for \(\mathrm{X} = 7\).

+

Interestingly both variants end up closer in terms of fitting error than we anticipated, which shows that the evolutionary algorithm we employed is capable of correcting a purposefully created bad grid. Also this confirms, that in our cases the number of control–points is more important for quality than their placement, which is captured by the variability via the rank of the deformation–matrix.

+

Overall the correlation between variability and fitness–error were significant and showed a very strong correlation in all our tests. The detailed correlation–coefficients are given in table alongside their p–values.

+

As introduces in section and visualized in figure , we know, that not all control–points have to necessarily contribute to the parametrization of our 3D–model. Because we are starting from a sphere, some control–points are too far away from the surface to contribute to the deformation at all.

+

One can already see in 2D in figure , that this effect starts with a regular \(9 \times 9\) grid on a perfect circle. To make sure we observe this, we evaluated the variability for 100 randomly moved \(10 \times 10 \times 10\) grids on the sphere we start out with.

+ +

As the variability is defined by \(\frac{\mathrm{rank}(\vec{U})}{n}\) we can easily recover the rank of the deformation–matrix \(\vec{U}\). The results are shown in the histogram in figure . Especially in the centre of the sphere and in the corners of our grid we effectively loose control–points for our parametrization.

+

This of course yields a worse error as when those control–points would be put to use and one should expect a loss in quality evident by a higher reconstruction–error opposed to a grid where they are used. Sadly we could not run a in–depth test on this due to computational limitations.

+

Nevertheless this hints at the notion, that variability is a good measure for the overall quality of a fit.

+
+
+

Regularity

+ +

Opposed to the predictions of variability our test on regularity gave a mixed result — similar to the 1D–case.

+

In roughly half of the scenarios we have a significant, but weak to moderate correlation between regularity and number of iterations. On the other hand in the scenarios where we increased the number of control–points, namely \(125\) for the \(5 \times 5 \times 5\) grid and \(216\) for the \(6 \times 6 \times 6\) grid we found a significant, but weak anti–correlation when taking all three tests into account11, which seem to contradict the findings/trends for the sets with \(64\), \(80\), and \(112\) control–points (first two rows of table ).

+

Taking all results together we only find a very weak, but significant link between regularity and the number of iterations needed for the algorithm to converge.

+ +

As can be seen from figure , we can observe that increasing the number of control–points helps the convergence–speeds. The regularity–criterion first behaves as we would like to, but then switches to behave exactly opposite to our expectations, as can be seen in the first three plots. While the number of control–points increases from red to green to blue and the number of iterations decreases, the regularity seems to increase at first, but then decreases again on higher grid–resolutions.

+

This can be an artefact of the definition of regularity, as it is defined by the inverse condition–number of the deformation–matrix \(\vec{U}\), being the fraction \(\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}\) between the least and greatest right singular value.

+

As we observed in the previous section, we cannot guarantee that each control–point has an effect (see figure ) and so a small minimal right singular value occurring on higher grid–resolutions seems likely the problem.

+

Adding to this we also noted, that in the case of the \(10 \times 10 \times 10\)–grid the regularity was always \(0\), as a non–contributing control–point yields a \(0\)–column in the deformation–matrix, thus letting \(\sigma_\mathrm{min} = 0\). A better definition for regularity (i.e. using the smallest non–zero right singular value) could solve this particular issue, but not fix the trend we noticed above.

+
+
+

Improvement Potential

+ +

Comparing to the 1D–scenario, we do not know the optimal solution to the given problem and for the calculation we only use the initial gradient produced by the initial correlation between both objects. This gradient changes with every iteration and will be off our first guess very quickly. This is the reason we are not trying to create artificially bad gradients, as we have a broad range in quality of such gradients anyway.

+ +

We plotted our findings on the improvement potential in a similar way as we did before with the regularity. In figure one can clearly see the correlation and the spread within each setup and the behaviour when we increase the number of control–points.

+

Along with this we also give the Spearman–coefficients along with their p–values in table . Within one scenario we only find a weak to moderate correlation between the improvement potential and the fitting error, but all findings (except for \(7 \times 4 \times 4\) and \(6 \times 6 \times 6\)) are significant.

+

If we take multiple datasets into account the correlation is very strong and significant, which is good, as this functions as a litmus–test, because the quality is naturally tied to the number of control–points.

+

All in all the improvement potential seems to be a good and sensible measure of quality, even given gradients of varying quality.

+

Lastly, a small note on the behaviour of improvement potential and convergence speed, as we used this in the 1D case to argue, why the regularity defied our expectations. As a contrast we wanted to show, that improvement potential cannot serve for good predictions of the convergence speed. In figure we show improvement potential against number of iterations for both scenarios. As one can see, in the 1D scenario we have a strong and significant correlation (with \(-r_S = -0.72\), \(p = 0\)), whereas in the 3D scenario we have the opposite significant and strong effect (with \(-r_S = 0.69\), \(p=0\)), so these correlations clearly seem to be dependent on the scenario and are not suited for generalization.

+ +
+
+
+
+

Discussion and outlook

+ +

In this thesis we took a look at the different criteria for evolvability as introduced by Richter et al., namely variability, regularity and improvement potential under different setup–conditions. Where Richter et al. used , we employed to set up a low–complexity parametrization of a more complex vertex–mesh.

+

In our findings we could show in the 1D–scenario, that there were statistically significant very strong correlations between variability and fitting error (\(0.94\)) and improvement potential and fitting error (\(1.0\)) with comparable results than Richter et al. (with \(0.31\) to \(0.88\) for the former and \(0.75\) to \(0.99\) for the latter), whereas we found only weak correlations for regularity and convergence–speed (\(0.28\)) opposed to Richter et al. with \(0.39\) to \(0.91\).12

+

For the 3D–scenario our results show a very strong, significant correlation between variability and fitting error with \(0.89\) to \(0.94\), which are pretty much in line with the findings of Richter et al. (\(0.65\) to \(0.95\)). The correlation between improvement potential and fitting error behave similar, with our findings having a significant coefficient of \(0.3\) to \(0.95\) depending on the grid–resolution compared to the \(0.61\) to \(0.93\) from Richter et al. In the case of the correlation of regularity and convergence speed we found very different (and often not significant) correlations and anti–correlations ranging from \(-0.25\) to \(0.46\), whereas Richter et al. reported correlations between \(0.34\) to \(0.87\).

+

Taking these results into consideration, one can say, that variability and improvement potential are very good estimates for the quality of a fit using as a deformation function, while we could not reproduce similar compelling results as Richter et al. for regularity and convergence speed.

+

One reason for the bad or erratic behaviour of the regularity–criterion could be that in an –setting we have a likelihood of having control–points that are only contributing to the whole parametrization in negligible amounts, resulting in very small right singular values of the deformation–matrix \(\vec{U}\) that influence the condition–number and thus the regularity in a significant way. Further research is needed to refine regularity so that these problems get addressed, like taking all singular values into account when capturing the notion of regularity.

+

Richter et al. also compared the behaviour of direct and indirect manipulation in , whereas we merely used an indirect –approach. As direct manipulations tend to perform better than indirect manipulations, the usage of could also work better with the criteria we examined. This can also solve the problem of bad singular values for the regularity as the incorporation of the parametrization of the points on the surface — which are the essential part of a direct–manipulation — could cancel out a bad control–grid as the bad control–points are never or negligibly used to parametrize those surface–points.

+
+
+
+
    +
  1. one more for each recursive step.

  2. +
  3. Warning: in the case of \(d=1\) the recursion–formula yields a \(0\) denominator, but \(N\) is also \(0\). The right solution for this case is a derivative of \(0\)

  4. +
  5. Some examples of this are explained in detail in

  6. +
  7. We use \(\vec{S}\) in this notation, as we will use this parametrization of a source–mesh to manipulate \(\vec{S}\) into a target–mesh \(\vec{T}\) via \(\vec{P}\)

  8. +
  9. Normally these are \(d-1\) to each side, but at the boundaries border points get used multiple times to meet the number of points required

  10. +
  11. One example would be, when parts of an algorithm depend on the inverse of the minimal right singular value leading to a division by \(0\).

  12. +
  13. For the special case of the outer layer we only applied noise away from the object, so the object is still confined in the convex hull of the control–points.

  14. +
  15. The parametrization is encoded in \(\vec{U}\) and the initial position of the control–points. See

  16. +
  17. Note: On the Edges this displacement is only applied outwards by flipping the sign of \(r\), if appropriate.

  18. +
  19. Again, we flip the signs for the edges, if necessary to have the object still in the convex hull.

  20. +
  21. Displayed as \(Y \times Y \times Y\)

  22. +
  23. We only took statistically significant results into consideration when compiling these numbers. Details are given in the respective chapters.

  24. +
+
+ + +
+
+ + + + + + + + + + + diff --git a/presentation/presentation.md b/presentation/presentation.md new file mode 100644 index 0000000..1be7093 --- /dev/null +++ b/presentation/presentation.md @@ -0,0 +1,1402 @@ +--- +title: Evaluation of the Performance of Randomized FFD Control Grids +subtitle: Master Thesis +author: Stefan Dresselhaus +affiliation: Graphics & Geometry Group +... + +# Introduction + +Many modern industrial design processes require advanced optimization methods +due to the increased complexity resulting from more and more degrees of freedom +as methods refine and/or other methods are used. Examples for this are physical +domains like aerodynamics (i.e. drag), fluid dynamics (i.e. throughput of liquid) +--- where the complexity increases with the temporal and spatial resolution of +the simulation --- or known hard algorithmic problems in informatics (i.e. +layouting of circuit boards or stacking of 3D--objects). Moreover these are +typically not static environments but requirements shift over time or from case +to case. + +\begin{figure}[hbt] +\centering +\includegraphics[width=\textwidth]{img/Evo_overview.png} +\caption{Example of the use of evolutionary algorithms in automotive design +(from \cite{anrichterEvol}).} +\end{figure} + +Evolutionary algorithms cope especially well with these problem domains while +addressing all the issues at hand\cite{minai2006complex}. One of the main +concerns in these algorithms is the formulation of the problems in terms of a +*genome* and *fitness--function*. While one can typically use an arbitrary +cost--function for the *fitness--functions* (i.e. amount of drag, amount of space, +etc.), the translation of the problem--domain into a simple parametric +representation (the *genome*) can be challenging. + +This translation is often necessary as the target of the optimization may have +too many degrees of freedom for a reasonable computation. In the example of an +aerodynamic simulation of drag onto an object, those object--designs tend to +have a high number of vertices to adhere to various requirements (visual, +practical, physical, etc.). A simpler representation of the same object in only +a few parameters that manipulate the whole in a sensible matter are desirable, +as this often decreases the computation time significantly. + +Additionally one can exploit the fact, that drag in this case is especially +sensitive to non--smooth surfaces, so that a smooth local manipulation of the +surface as a whole is more advantageous than merely random manipulation of the +vertices. + +The quality of such a low--dimensional representation in biological evolution is +strongly tied to the notion of *evolvability*\cite{wagner1996complex}, as the +parametrization of the problem has serious implications on the convergence speed +and the quality of the solution\cite{Rothlauf2006}. +However, there is no consensus on how *evolvability* is defined and the meaning +varies from context to context\cite{richter2015evolvability}. As a consequence +there is need for some criteria we can measure, so that we are able to compare different +representations to learn and improve upon these. + +\begin{figure}[hbt] +\centering +\includegraphics[width=\textwidth]{img/deformations.png} +\caption{Example of RBF--based deformation and FFD targeting the same mesh.} +\end{figure} + +One example of such a general representation of an object is to generate random +points and represent vertices of an object as distances to these points --- for +example via \acf{RBF}. If one (or the algorithm) would move such a point the +object will get deformed only locally (due to the \ac{RBF}). As this results in +a simple mapping from the parameter--space onto the object one can try out +different representations of the same object and evaluate which criteria may be +suited to describe this notion of *evolvability*. This is exactly what Richter +et al.\cite{anrichterEvol} have done. + +As we transfer the results of Richter et al.\cite{anrichterEvol} from using +\acf{RBF} as a representation to manipulate geometric objects to the use of +\acf{FFD} we will use the same definition for *evolvability* the original author +used, namely *regularity*, *variability*, and *improvement potential*. We +introduce these term in detail in Chapter \ref{sec:intro:rvi}. In the original +publication the author could show a correlation between these +evolvability--criteria with the quality and convergence speed of such +optimization. + +We will replicate the same setup on the same objects but use \acf{FFD} instead of +\acf{RBF} to create a local deformation near the control--points and evaluate if +the evolution--criteria still work as a predictor for *evolvability* of the +representation given the different deformation scheme, as suspected in +\cite{anrichterEvol}. + +First we introduce different topics in isolation in Chapter \ref{sec:back}. We +take an abstract look at the definition of \ac{FFD} for a one--dimensional line +(in \ref{sec:back:ffd}) and discuss why this is a sensible deformation function +(in \ref{sec:back:ffdgood}). +Then we establish some background--knowledge of evolutionary algorithms (in +\ref{sec:back:evo}) and why this is useful in our domain (in +\ref{sec:back:evogood}) followed by the definition of the different +evolvability--criteria established in \cite{anrichterEvol} (in \ref {sec:intro:rvi}). + +In Chapter \ref{sec:impl} we take a look at our implementation of \ac{FFD} and +the adaptation for 3D--meshes that were used. Next, in Chapter \ref{sec:eval}, +we describe the different scenarios we use to evaluate the different +evolvability--criteria incorporating all aspects introduced in Chapter +\ref{sec:back}. Following that, we evaluate the results in +Chapter \ref{sec:res} with further on discussion, summary and outlook in +Chapter \ref{sec:dis}. + + +# Background +\label{sec:back} + +## What is \acf{FFD}? +\label{sec:back:ffd} + +First of all we have to establish how a \ac{FFD} works and why this is a good +tool for deforming geometric objects (especially meshes in our case) in the +first place. For simplicity we only summarize the 1D--case from +\cite{spitzmuller1996bezier} here and go into the extension to the 3D case in +chapter \ref{3dffd}. + +The main idea of \ac{FFD} is to create a function $s : [0,1[^d \mapsto +\mathbb{R}^d$ that spans a certain part of a vector--space and is only linearly +parametrized by some special control--points $p_i$ and an constant +attribution--function $a_i(u)$, so +$$ +s(\vec{u}) = \sum_i a_i(\vec{u}) \vec{p_i} +$$ +can be thought of a representation of the inside of the convex hull generated by +the control--points where each position inside can be accessed by the right +$u \in [0,1[^d$. + +\begin{figure}[!ht] +\begin{center} +\includegraphics[width=0.7\textwidth]{img/B-Splines.png} +\end{center} +\caption[Example of B--Splines]{Example of a parametrization of a line with +corresponding deformation to generate a deformed objet} +\label{fig:bspline} +\end{figure} + +In the 1--dimensional example in figure \ref{fig:bspline}, the control--points +are indicated as red dots and the colour--gradient should hint at the $u$--values +ranging from $0$ to $1$. + +We now define a \acf{FFD} by the following: +Given an arbitrary number of points $p_i$ alongside a line, we map a scalar +value $\tau_i \in [0,1[$ to each point with $\tau_i < \tau_{i+1} \forall i$ +according to the position of $p_i$ on said line. +Additionally, given a degree of the target polynomial $d$ we define the curve +$N_{i,d,\tau_i}(u)$ as follows: + +\begin{equation} \label{eqn:ffd1d1} +N_{i,0,\tau}(u) = \begin{cases} 1, & u \in [\tau_i, \tau_{i+1}[ \\ 0, & \mbox{otherwise} \end{cases} +\end{equation} +and +\begin{equation} \label{eqn:ffd1d2} +N_{i,d,\tau}(u) = \frac{u-\tau_i}{\tau_{i+d}} N_{i,d-1,\tau}(u) + \frac{\tau_{i+d+1} - u}{\tau_{i+d+1}-\tau_{i+1}} N_{i+1,d-1,\tau}(u) +\end{equation} + +If we now multiply every $p_i$ with the corresponding $N_{i,d,\tau_i}(u)$ we get +the contribution of each point $p_i$ to the final curve--point parametrized only +by $u \in [0,1[$. As can be seen from \eqref{eqn:ffd1d2} we only access points +$[p_i..p_{i+d}]$ for any given $i$^[one more for each recursive step.], which gives +us, in combination with choosing $p_i$ and $\tau_i$ in order, only a local +interference of $d+1$ points. + +We can even derive this equation straightforward for an arbitrary +$N$^[*Warning:* in the case of $d=1$ the recursion--formula yields a $0$ +denominator, but $N$ is also $0$. The right solution for this case is a +derivative of $0$]: + +$$\frac{\partial}{\partial u} N_{i,d,r}(u) = \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u)$$ + +For a B--Spline +$$s(u) = \sum_{i} N_{i,d,\tau_i}(u) p_i$$ +these derivations yield $\left(\frac{\partial}{\partial u}\right)^d s(u) = 0$. + +Another interesting property of these recursive polynomials is that they are +continuous (given $d \ge 1$) as every $p_i$ gets blended in between $\tau_i$ and +$\tau_{i+d}$ and out between $\tau_{i+1}$, and $\tau_{i+d+1}$ as can bee seen from the two coefficients +in every step of the recursion. + +This means that all changes are only a local linear combination between the +control--point $p_i$ to $p_{i+d+1}$ and consequently this yields to the +convex--hull--property of B--Splines --- meaning, that no matter how we choose +our coefficients, the resulting points all have to lie inside convex--hull of +the control--points. + +For a given point $s_i$ we can then calculate the contributions +$u_{i,j}~:=~N_{j,d,\tau}$ of each control point $p_j$ to get the +projection from the control--point--space into the object--space: +$$ +s_i = \sum_j u_{i,j} \cdot p_j = \vec{n}_i^{T} \vec{p} +$$ +or written for all points at the same time: +$$ +\vec{s} = \vec{U} \vec{p} +$$ +where $\vec{U}$ is the $n \times m$ transformation--matrix (later on called +**deformation matrix**) for $n$ object--space--points and $m$ control--points. + +\begin{figure}[ht] +\begin{center} +\includegraphics[width=\textwidth]{img/unity.png} +\end{center} +\caption[B--spline--basis--function as partition of unity]{From \cite[Figure 2.13]{brunet2010contributions}:\newline +\glqq Some interesting properties of the B--splines. On the natural definition domain +of the B--spline ($[k_0,k_4]$ on this figure), the B--Spline basis functions sum +up to one (partition of unity). In this example, we use B--Splines of degree 2. +The horizontal segment below the abscissa axis represents the domain of +influence of the B--splines basis function, i.e. the interval on which they are +not null. At a given point, there are at most $ d+1$ non--zero B--Spline basis +functions (compact support).\grqq \newline +Note, that Brunet starts his index at $-d$ opposed to our definition, where we +start at $0$.} +\label{fig:partition_unity} +\end{figure} + +Furthermore B--Spline--basis--functions form a partition of unity for all, but +the first and last $d$ control--points\cite{brunet2010contributions}. Therefore +we later on use the border--points $d+1$ times, such that $\sum_j u_{i,j} p_j = p_i$ +for these points. + +The locality of the influence of each control--point and the partition of unity +was beautifully pictured by Brunet, which we included here as figure +\ref{fig:partition_unity}. + +### Why is \ac{FFD} a good deformation function? +\label{sec:back:ffdgood} + +The usage of \ac{FFD} as a tool for manipulating follows directly from the +properties of the polynomials and the correspondence to the control--points. +Having only a few control--points gives the user a nicer high--level--interface, as +she only needs to move these points and the model follows in an intuitive +manner. The deformation is smooth as the underlying polygon is smooth as well +and affects as many vertices of the model as needed. Moreover the changes are +always local so one risks not any change that a user cannot immediately see. + +But there are also disadvantages of this approach. The user loses the ability to +directly influence vertices and even seemingly simple tasks as creating a +plateau can be difficult to +achieve\cite[chapter~3.2]{hsu1991dmffd}\cite{hsu1992direct}. + +This disadvantages led to the formulation of +\acf{DM--FFD}\cite[chapter~3.3]{hsu1991dmffd} in which the user directly +interacts with the surface--mesh. All interactions will be applied +proportionally to the control--points that make up the parametrization of the +interaction--point itself yielding a smooth deformation of the surface *at* the +surface without seemingly arbitrary scattered control--points. Moreover this +increases the efficiency of an evolutionary optimization\cite{Menzel2006}, which +we will use later on. + +\begin{figure}[!ht] +\includegraphics[width=\textwidth]{img/hsu_fig7.png} +\caption{Figure 7 from \cite{hsu1991dmffd}.} +\label{fig:hsu_fig7} +\end{figure} + +But this approach also has downsides as can be seen in figure +\ref{fig:hsu_fig7}, as the tessellation of the invisible grid has a major impact +on the deformation itself. + +All in all \ac{FFD} and \ac{DM--FFD} are still good ways to deform a high--polygon +mesh albeit the downsides. + +## What is evolutionary optimization? +\label{sec:back:evo} + +In this thesis we are using an evolutionary optimization strategy to solve the +problem of finding the best parameters for our deformation. This approach, +however, is very generic and we introduce it here in a broader sense. + +\begin{algorithm} +\caption{An outline of evolutionary algorithms} +\label{alg:evo} +\begin{algorithmic} +\STATE t := 0; +\STATE initialize $P(0) := \{\vec{a}_1(0),\dots,\vec{a}_\mu(0)\} \in I^\mu$; +\STATE evaluate $F(0) : \{\Phi(x) | x \in P(0)\}$; +\WHILE{$c(F(t)) \neq$ \TRUE} + \STATE recombine: $P’(t) := r(P(t))$; + \STATE mutate: $P''(t) := m(P’(t))$; + \STATE evaluate $F''(t) : \{\Phi(x) | x \in P''(t)\}$ + \STATE select: $P(t + 1) := s(P''(t) \cup Q,\Phi)$; + \STATE t := t + 1; +\ENDWHILE +\end{algorithmic} +\end{algorithm} + +The general shape of an evolutionary algorithm (adapted from +\cite{back1993overview}) is outlined in Algorithm \ref{alg:evo}. Here, $P(t)$ +denotes the population of parameters in step $t$ of the algorithm. The +population contains $\mu$ individuals $a_i$ from the possible individual--set +$I$ that fit the shape of the parameters we are looking for. Typically these are +initialized by a random guess or just zero. Further on we need a so--called +*fitness--function* $\Phi : I \mapsto M$ that can take each parameter to a measurable +space $M$ (usually $M = \mathbb{R}$) along a convergence--function $c : I \mapsto \mathbb{B}$ +that terminates the optimization. + +Biologically speaking the set $I$ corresponds to the set of possible *genotypes* +while $M$ represents the possible observable *phenotypes*. *Genotypes* define +all initial properties of an individual, but their properties are not directly +observable. It is the genes, that evolve over time (and thus correspond to the +parameters we are tweaking in our algorithms or the genes in nature), but only +the *phenotypes* make certain behaviour observable (algorithmically through our +*fitness--function*, biologically by the ability to survive and produce +offspring). Any individual in our algorithm thus experience a biologically +motivated life cycle of inheriting genes from the parents, modified by mutations +occurring, performing according to a fitness--metric, and generating offspring +based on this. Therefore each iteration in the while--loop above is also often +named generation. + +One should note that there is a subtle difference between *fitness--function* +and a so called *genotype--phenotype--mapping*. The first one directly applies +the *genotype--phenotype--mapping* and evaluates the performance of an individual, +thus going directly from genes/parameters to reproduction--probability/score. +In a concrete example the *genotype* can be an arbitrary vector (the genes), the +*phenotype* is then a deformed object, and the performance can be a single +measurement like an air--drag--coefficient. The *genotype--phenotype--mapping* +would then just be the generation of different objects from that +starting--vector, whereas the *fitness--function* would go directly from such a +starting--vector to the coefficient that we want to optimize. + +The main algorithm just repeats the following steps: + +- **Recombine** with a recombination--function $r : I^{\mu} \mapsto I^{\lambda}$ to + generate $\lambda$ new individuals based on the characteristics of the $\mu$ + parents. + This makes sure that the next guess is close to the old guess. +- **Mutate** with a mutation--function $m : I^{\lambda} \mapsto I^{\lambda}$ to + introduce new effects that cannot be produced by mere recombination of the + parents. + Typically this just adds minor defects to individual members of the population + like adding a random gaussian noise or amplifying/dampening random parts. +- **Selection** takes a selection--function $s : (I^\lambda \cup I^{\mu + \lambda},\Phi) \mapsto I^\mu$ that + selects from the previously generated $I^\lambda$ children and optionally also + the parents (denoted by the set $Q$ in the algorithm) using the + *fitness--function* $\Phi$. The result of this operation is the next Population + of $\mu$ individuals. + +All these functions can (and mostly do) have a lot of hidden parameters that +can be changed over time. A good overview of this is given in +\cite{eiben1999parameter}, so we only give a small excerpt here. + +For example the mutation can consist of merely a single $\sigma$ determining the +strength of the gaussian defects in every parameter --- or giving a different +$\sigma$ to every component of those parameters. An even more sophisticated +example would be the \glqq 1/5 success rule\grqq \ from +\cite{rechenberg1973evolutionsstrategie}. + +Also in the selection--function it may not be wise to only take the +best--performing individuals, because it may be that the optimization has to +overcome a barrier of bad fitness to achieve a better local optimum. + +Recombination also does not have to be mere random choosing of parents, but can +also take ancestry, distance of genes or groups of individuals into account. + +## Advantages of evolutionary algorithms +\label{sec:back:evogood} + +The main advantage of evolutionary algorithms is the ability to find optima of +general functions just with the help of a given *fitness--function*. Components +and techniques for evolutionary algorithms are specifically known to +help with different problems arising in the domain of +optimization\cite{weise2012evolutionary}. An overview of the typical problems +are shown in figure \ref{fig:probhard}. + +\begin{figure}[!ht] +\includegraphics[width=\textwidth]{img/weise_fig3.png} +\caption{Fig.~3. taken from \cite{weise2012evolutionary}} +\label{fig:probhard} +\end{figure} + +Most of the advantages stem from the fact that a gradient--based procedure has +usually only one point of observation from where it evaluates the next steps, +whereas an evolutionary strategy starts with a population of guessed solutions. +Because an evolutionary strategy can be modified according to the +problem--domain (i.e. by the ideas given above) it can also approximate very +difficult problems in an efficient manner and even self--tune parameters +depending on the ancestry at runtime^[Some examples of this are explained in +detail in \cite{eiben1999parameter}]. + +If an analytic best solution exists and is easily computable (i.e. because the +error--function is convex) an evolutionary algorithm is not the right choice. +Although both converge to the same solution, the analytic one is usually faster. + +But in reality many problems have no analytic solution, because the problem is +either not convex or there are so many parameters that an analytic solution +(mostly meaning the equivalence to an exhaustive search) is computationally not +feasible. Here evolutionary optimization has one more advantage as one can at +least get suboptimal solutions fast, which then refine over time and still +converge to a decent solution much faster than an exhaustive search. + +## Criteria for the evolvability of linear deformations +\label{sec:intro:rvi} + +As we have established in chapter \ref{sec:back:ffd}, we can describe a +deformation by the formula +$$ +\vec{S} = \vec{U}\vec{P} +$$ +where $\vec{S}$ is a $n \times d$ matrix of vertices^[We use $\vec{S}$ in this +notation, as we will use this parametrization of a source--mesh to manipulate +$\vec{S}$ into a target--mesh $\vec{T}$ via $\vec{P}$], $\vec{U}$ are the (during +parametrization) calculated deformation--coefficients and $P$ is a $m \times d$ matrix +of control--points that we interact with during deformation. + +We can also think of the deformation in terms of differences from the original +coordinates +$$ +\Delta \vec{S} = \vec{U} \cdot \Delta \vec{P} +$$ +which is isomorphic to the former due to the linearity of the deformation. One +can see in this way, that the way the deformation behaves lies solely in the +entries of $\vec{U}$, which is why the three criteria focus on this. + +### Variability + +In \cite{anrichterEvol} *variability* is defined as +$$\mathrm{variability}(\vec{U}) := \frac{\mathrm{rank}(\vec{U})}{n},$$ +whereby $\vec{U}$ is the $n \times m$ deformation--Matrix used to map the $m$ +control--points onto the $n$ vertices. + +Given $n = m$, an identical number of control--points and vertices, this +quotient will be $=1$ if all control--points are independent of each other and +the solution is to trivially move every control--point onto a target--point. + +In praxis the value of $V(\vec{U})$ is typically $\ll 1$, because there are only +few control--points for many vertices, so $m \ll n$. + +This criterion should correlate to the degrees of freedom the given +parametrization has. This can be seen from the fact, that +$\mathrm{rank}(\vec{U})$ is limited by $\min(m,n)$ and --- as $n$ is constant +--- can never exceed $n$. + +The rank itself is also interesting, as control--points could theoretically be +placed on top of each other or be linear dependent in another way --- but will +in both cases lower the rank below the number of control--points $m$ and are +thus measurable by the *variability*. + +### Regularity + +*Regularity* is defined\cite{anrichterEvol} as +$$\mathrm{regularity}(\vec{U}) := \frac{1}{\kappa(\vec{U})} = \frac{\sigma_{min}}{\sigma_{max}}$$ +where $\sigma_{min}$ and $\sigma_{max}$ are the smallest and greatest right singular +value of the deformation--matrix $\vec{U}$. + +As we deform the given Object only based on the parameters as $\vec{p} \mapsto +f(\vec{x} + \vec{U}\vec{p})$ this makes sure that $\|\vec{Up}\| \propto +\|\vec{p}\|$ when $\kappa(\vec{U}) \approx 1$. The inversion of $\kappa(\vec{U})$ +is only performed to map the criterion--range to $[0..1]$, where $1$ is the +optimal value and $0$ is the worst value. + +On the one hand this criterion should be characteristic for numeric +stability\cite[chapter 2.7]{golub2012matrix} and on the other hand for the +convergence speed of evolutionary algorithms\cite{anrichterEvol} as it is tied +to the notion of locality\cite{weise2012evolutionary,thorhauer2014locality}. + +### Improvement Potential + +In contrast to the general nature of *variability* and *regularity*, which are +agnostic of the *fitness--function* at hand, the third criterion should reflect a +notion of the potential for optimization, taking a guess into account. + +Most of the times some kind of gradient $g$ is available to suggest a +direction worth pursuing; either from a previous iteration or by educated +guessing. We use this to guess how much change can be achieved in +the given direction. + +The definition for an *improvement potential* $P$ is\cite{anrichterEvol}: +$$ +\mathrm{potential}(\vec{U}) := 1 - \|(\vec{1} - \vec{UU}^+)\vec{G}\|^2_F +$$ +given some approximate $n \times d$ fitness--gradient $\vec{G}$, normalized to +$\|\vec{G}\|_F = 1$, whereby $\|\cdot\|_F$ denotes the Frobenius--Norm. + +# Implementation of \acf{FFD} +\label{sec:impl} + +The general formulation of B--Splines has two free parameters $d$ and $\tau$ +which must be chosen beforehand. + +As we usually work with regular grids in our \ac{FFD} we define $\tau$ +statically as $\tau_i = \nicefrac{i}{n}$ whereby $n$ is the number of +control--points in that direction. + +$d$ defines the *degree* of the B--Spline--Function (the number of times this +function is differentiable) and for our purposes we fix $d$ to $3$, but give the +formulas for the general case so it can be adapted quite freely. + + +## Adaption of \ac{FFD} +\label{sec:ffd:adapt} + +As we have established in Chapter \ref{sec:back:ffd} we can define an +\ac{FFD}--displacement as +\begin{equation} +\Delta_x(u) = \sum_i N_{i,d,\tau_i}(u) \Delta_x c_i +\end{equation} + +Note that we only sum up the $\Delta$--displacements in the control--points $c_i$ to get +the change in position of the point we are interested in. + +In this way every deformed vertex is defined by +$$ +\textrm{Deform}(v_x) = v_x + \Delta_x(u) +$$ +with $u \in [0..1[$ being the variable that connects the high--detailed +vertex--mesh to the low--detailed control--grid. To actually calculate the new +position of the vertex we first have to calculate the $u$--value for each +vertex. This is achieved by finding out the parametrization of $v$ in terms of +$c_i$ +$$ +v_x \overset{!}{=} \sum_i N_{i,d,\tau_i}(u) c_i +$$ +so we can minimize the error between those two: +$$ +\underset{u}{\argmin}\,Err(u,v_x) = \underset{u}{\argmin}\,2 \cdot \|v_x - \sum_i N_{i,d,\tau_i}(u) c_i\|^2_2 +$$ +As this error--term is quadratic we just derive by $u$ yielding +$$ +\begin{array}{rl} +\frac{\partial}{\partial u} & v_x - \sum_i N_{i,d,\tau_i}(u) c_i \\ += & - \sum_i \left( \frac{d}{\tau_{i+d} - \tau_i} N_{i,d-1,\tau}(u) - \frac{d}{\tau_{i+d+1} - \tau_{i+1}} N_{i+1,d-1,\tau}(u) \right) c_i +\end{array} +$$ +and do a gradient--descend to approximate the value of $u$ up to an $\epsilon$ of $0.0001$. + +For this we employ the Gauss--Newton algorithm\cite{gaussNewton}, which +converges into the least--squares solution. An exact solution of this problem is +impossible most of the time, because we usually have way more vertices +than control--points ($\#v~\gg~\#c$). + +## Adaption of \ac{FFD} for a 3D--Mesh +\label{3dffd} + +This is a straightforward extension of the 1D--method presented in the last +chapter. But this time things get a bit more complicated. As we have a +3--dimensional grid we may have a different amount of control--points in each +direction. + +Given $n,m,o$ control--points in $x,y,z$--direction each Point on the curve is +defined by +$$V(u,v,w) = \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot C_{ijk}.$$ + +In this case we have three different B--Splines (one for each dimension) and also +3 variables $u,v,w$ for each vertex we want to approximate. + +Given a target vertex $\vec{p}^*$ and an initial guess $\vec{p}=V(u,v,w)$ +we define the error--function for the gradient--descent as: + +$$Err(u,v,w,\vec{p}^{*}) = \vec{p}^{*} - V(u,v,w)$$ + +And the partial version for just one direction as + +$$Err_x(u,v,w,\vec{p}^{*}) = p^{*}_x - \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x $$ + +To solve this we derive partially, like before: + +$$ +\begin{array}{rl} + \displaystyle \frac{\partial Err_x}{\partial u} & p^{*}_x - \displaystyle \sum_i \sum_j \sum_k N_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x \\ + = & \displaystyle - \sum_i \sum_j \sum_k N'_{i,d,\tau_i}(u) N_{j,d,\tau_j}(v) N_{k,d,\tau_k}(w) \cdot {c_{ijk}}_x +\end{array} +$$ + +The other partial derivatives follow the same pattern yielding the Jacobian: + +$$ +J(Err(u,v,w)) = +\left( +\begin{array}{ccc} +\frac{\partial Err_x}{\partial u} & \frac{\partial Err_x}{\partial v} & \frac{\partial Err_x}{\partial w} \\ +\frac{\partial Err_y}{\partial u} & \frac{\partial Err_y}{\partial v} & \frac{\partial Err_y}{\partial w} \\ +\frac{\partial Err_z}{\partial u} & \frac{\partial Err_z}{\partial v} & \frac{\partial Err_z}{\partial w} +\end{array} +\right) +$$ +$$ +\scriptsize += +\left( +\begin{array}{ccc} +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_x & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_x \\ +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_y & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_y \\ +- \displaystyle \sum_{i,j,k} N'_{i}(u) N_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z &- \displaystyle \sum_{i,j,k} N_{i}(u) N'_{j}(v) N_{k}(w) \cdot {c_{ijk}}_z & - \displaystyle \sum_{i,j,k} N_{i}(u) N_{j}(v) N'_{k}(w) \cdot {c_{ijk}}_z +\end{array} +\right) +$$ + +With the Gauss--Newton algorithm we iterate via the formula +$$J(Err(u,v,w)) \cdot \Delta \left( \begin{array}{c} u \\ v \\ w \end{array} \right) = -Err(u,v,w)$$ +and use Cramer's rule for inverting the small Jacobian and solving this system of +linear equations. + +As there is no strict upper bound of the number of iterations for this +algorithm, we just iterate it long enough to be within the given +$\epsilon$--error above. This takes --- depending on the shape of the object and +the grid --- about $3$ to $5$ iterations that we observed in practice. + +Another issue that we observed in our implementation is, that multiple local +optima may exist on self--intersecting grids. We solve this problem by defining +self--intersecting grids to be *invalid* and do not test any of them. + +This is not such a big problem as it sounds at first, as self--intersections +mean, that control--points being further away from a given vertex have more +influence over the deformation than control--points closer to this vertex. Also +this contradicts the notion of locality that we want to achieve and deemed +beneficial for a good behaviour of the evolutionary algorithm. + +## Deformation Grid +\label{sec:impl:grid} + +As mentioned in chapter \ref{sec:back:evo}, the way of choosing the +representation to map the general problem (mesh--fitting/optimization in our +case) into a parameter--space is very important for the quality and runtime of +evolutionary algorithms\cite{Rothlauf2006}. + +Because our control--points are arranged in a grid, we can accurately represent +each vertex--point inside the grids volume with proper B--Spline--coefficients +between $[0,1[$ and --- as a consequence --- we have to embed our object into it +(or create constant "dummy"--points outside). + +The great advantage of B--Splines is the local, direct impact of each +control point without having a $1:1$--correlation, and a smooth deformation. +While the advantages are great, the issues arise from the problem to decide +where to place the control--points and how many to place at all. + +\begin{figure}[!tbh] +\centering +\includegraphics{img/enoughCP.png} +\caption[Example of a high resolution control--grid]{A high resolution +($10 \times 10$) of control--points over a circle. Yellow/green points +contribute to the parametrization, red points don't.\newline +An Example--point (blue) is solely determined by the position of the green +control--points.} +\label{fig:enoughCP} +\end{figure} + +One would normally think, that the more control--points you add, the better the +result will be, but this is not the case for our B--Splines. Given any point +$\vec{p}$ only the $2 \cdot (d-1)$ control--points contribute to the parametrization of +that point^[Normally these are $d-1$ to each side, but at the boundaries border +points get used multiple times to meet the number of points required]. +This means, that a high resolution can have many control--points that are not +contributing to any point on the surface and are thus completely irrelevant to +the solution. + +We illustrate this phenomenon in figure \ref{fig:enoughCP}, where the red +central points are not relevant for the parametrization of the circle. This +leads to artefacts in the deformation--matrix $\vec{U}$, as the columns +corresponding to those control--points are $0$. + +This also leads to useless increased complexity, as the parameters corresponding +to those points will never have any effect, but a naive algorithm will still try +to optimize them yielding numeric artefacts in the best and non--terminating or +ill--defined solutions^[One example would be, when parts of an algorithm depend +on the inverse of the minimal right singular value leading to a division by $0$.] +at worst. + +One can of course neglect those columns and their corresponding control--points, +but this raises the question why they were introduced in the first place. We +will address this in a special scenario in \ref{sec:res:3d:var}. + +For our tests we chose different uniformly sized grids and added noise +onto each control--point^[For the special case of the outer layer we only applied +noise away from the object, so the object is still confined in the convex hull +of the control--points.] to simulate different starting--conditions. + +# Scenarios for testing evolvability--criteria using \ac{FFD} +\label{sec:eval} + +In our experiments we use the same two testing--scenarios, that were also used +by Richter et al.\cite{anrichterEvol} The first scenario deforms a plane into a shape +originally defined by Giannelli et al.\cite{giannelli2012thb}, where we setup +control--points in a 2--dimensional manner and merely deform in the +height--coordinate to get the resulting shape. + +In the second scenario we increase the degrees of freedom significantly by using +a 3--dimensional control--grid to deform a sphere into a face, so each control +point has three degrees of freedom in contrast to first scenario. + +## Test Scenario: 1D Function Approximation + +In this scenario we used the shape defined by Giannelli et al.\cite{giannelli2012thb}, +which is also used by Richter et al.\cite{anrichterEvol} using the same +discretization to $150 \times 150$ points for a total of $n = 22\,500$ vertices. The +shape is given by the following definition +\begin{equation} +t(x,y) = +\begin{cases} +0.5 \cos(4\pi \cdot q^{0.5}) + 0.5 & q(x,y) < \frac{1}{16},\\ +2(y-x) & 0 < y-x < 0.5,\\ +1 & 0.5 < y - x +\end{cases} +\end{equation} +with $(x,y) \in [0,2] \times [0,1]$ and $q(x,y)=(x-1.5)^2 + (y-0.5)^2$, which we have +visualized in figure \ref{fig:1dtarget}. + +\begin{figure}[ht] +\begin{center} +\includegraphics[width=0.7\textwidth]{img/1dtarget.png} +\end{center} +\caption[The 1D--target--shape]{The target--shape for our 1--dimensional optimization--scenario +including a wireframe--overlay of the vertices.} +\label{fig:1dtarget} +\end{figure} + +As the starting--plane we used the same shape, but set all +$z$--coordinates to $0$, yielding a flat plane, which is partially already +correct. + +Regarding the *fitness--function* $\mathrm{f}(\vec{p})$, we use the very simple approach +of calculating the squared distances for each corresponding vertex +\begin{equation} +\mathrm{f}(\vec{p}) = \sum_{i=1}^{n} \|(\vec{Up})_i - t_i\|_2^2 = \|\vec{Up} - \vec{t}\|^2 \rightarrow \min +\end{equation} +where $t_i$ are the respective target--vertices to the parametrized +source--vertices^[The parametrization is encoded in $\vec{U}$ and the initial +position of the control--points. See \ref{sec:ffd:adapt}] with the current +deformation--parameters $\vec{p} = (p_1,\dots, p_m)$. We can do this +one--to--one--correspondence because we have exactly the same number of +source and target--vertices do to our setup of just flattening the object. + +This formula is also the least--squares approximation error for which we +can compute the analytic solution $\vec{p^{*}} = \vec{U^+}\vec{t}$, yielding us +the correct gradient in which the evolutionary optimizer should move. + +## Test Scenario: 3D Function Approximation +\label{sec:test:3dfa} +Opposed to the 1--dimensional scenario before, the 3--dimensional scenario is +much more complex --- not only because we have more degrees of freedom on each +control point, but also, because the *fitness--function* we will use has no known +analytic solution and multiple local minima. + +\begin{figure}[ht] +\begin{center} +\includegraphics[width=0.9\textwidth]{img/3dtarget.png} +\end{center} +\caption[3D source and target meshes]{\newline +Left: The sphere we start from with 10\,807 vertices\newline +Right: The face we want to deform the sphere into with 12\,024 vertices.} +\label{fig:3dtarget} +\end{figure} + +First of all we introduce the set up: We have given a triangulated model of a +sphere consisting of $10\,807$ vertices, that we want to deform into a +the target--model of a face with a total of $12\,024$ vertices. Both of +these Models can be seen in figure \ref{fig:3dtarget}. + +Opposed to the 1D--case we cannot map the source and target--vertices in a +one--to--one--correspondence, which we especially need for the approximation of +the fitting--error. Hence we state that the error of one vertex is the distance +to the closest vertex of the respective other model and sum up the error from +the source and target. + +We therefore define the *fitness--function* to be: + +\begin{equation} +\mathrm{f}(\vec{P}) = \frac{1}{n} \underbrace{\sum_{i=1}^n \|\vec{c_T(s_i)} - +\vec{s_i}\|_2^2}_{\textrm{source--to--target--distance}} ++ \frac{1}{m} \underbrace{\sum_{i=1}^m \|\vec{c_S(t_i)} - +\vec{t_i}\|_2^2}_{\textrm{target--to--source--distance}} ++ \lambda \cdot \textrm{regularization}(\vec{P}) +\label{eq:fit3d} +\end{equation} + +where $\vec{c_T(s_i)}$ denotes the target--vertex that is corresponding to the +source--vertex $\vec{s_i}$ and $\vec{c_S(t_i)}$ denotes the source--vertex that +corresponds to the target--vertex $\vec{t_i}$. Note that the target--vertices +are given and fixed by the target--model of the face we want to deform into, +whereas the source--vertices vary depending on the chosen parameters $\vec{P}$, +as those get calculated by the previously introduces formula $\vec{S} = \vec{UP}$ +with $\vec{S}$ being the $n \times 3$--matrix of source--vertices, $\vec{U}$ the +$n \times m$--matrix of calculated coefficients for the \ac{FFD} --- analog to +the 1D case --- and finally $\vec{P}$ being the $m \times 3$--matrix of the +control--grid defining the whole deformation. + +As regularization--term we add a weighted Laplacian of the deformation that has +been used before by Aschenbach et al.\cite[Section 3.2]{aschenbach2015} on +similar models and was shown to lead to a more precise fit. The Laplacian +\begin{equation} +\mathrm{regularization}(\vec{P}) = \frac{1}{\sum_i A_i} \sum_{i=1}^n A_i \cdot \left( \sum_{\vec{s}_j \in \mathcal{N}(\vec{s}_i)} w_j \cdot \|\Delta \vec{s}_j - \Delta \vec{s}_i\|^2 \right) +\label{eq:reg3d} +\end{equation} +is determined by the cotangent weighted displacement $w_j$ of the to $s_i$ +connected vertices $\mathcal{N}(s_i)$ and $A_i$ is the Voronoi--area of the corresponding vertex +$\vec{s_i}$. We leave out the $\vec{R}_i$--term from the original paper as our +deformation is merely linear. + +This regularization--weight gives us a measure of stiffness for the material +that we will influence via the $\lambda$--coefficient to start out with a stiff +material that will get more flexible per iteration. As a side--effect this also +limits the effects of overagressive movement of the control--points in the +beginning of the fitting process and thus should limit the generation of +ill--defined grids mentioned in section \ref{sec:impl:grid}. + +# Evaluation of Scenarios +\label{sec:res} + +To compare our results to the ones given by Richter et al.\cite{anrichterEvol}, +we also use Spearman's rank correlation coefficient. Opposed to other popular +coefficients, like the Pearson correlation coefficient, which measures a linear +relationship between variables, the Spearman's coefficient assesses \glqq how +well an arbitrary monotonic function can describe the relationship between two +variables, without making any assumptions about the frequency distribution of +the variables\grqq\cite{hauke2011comparison}. + +As we don't have any prior knowledge if any of the criteria is linear and we are +just interested in a monotonic relation between the criteria and their +predictive power, the Spearman's coefficient seems to fit out scenario best and +was also used before by Richter et al.\cite{anrichterEvol} + +For interpretation of these values we follow the same interpretation used in +\cite{anrichterEvol}, based on \cite{weir2015spearman}: The coefficient +intervals $r_S \in [0,0.2[$, $[0.2,0.4[$, $[0.4,0.6[$, $[0.6,0.8[$, and $[0.8,1]$ are +classified as *very weak*, *weak*, *moderate*, *strong* and *very strong*. We +interpret p--values smaller than $0.01$ as *significant* and cut off the +precision of p--values after four decimal digits (thus often having a p--value +of $0$ given for p--values $< 10^{-4}$). + + +As we are looking for anti--correlation (i.e. our criterion should be maximized +indicating a minimal result in --- for example --- the reconstruction--error) +instead of correlation we flip the sign of the correlation--coefficient for +readability and to have the correlation--coefficients be in the +classification--range given above. + +For the evolutionary optimization we employ the \afc{CMA--ES} of the shark3.1 +library \cite{shark08}, as this algorithm was used by \cite{anrichterEvol} as +well. We leave the parameters at their sensible defaults as further explained in +\cite[Appendix~A: Table~1]{hansen2016cma}. + +## Procedure: 1D Function Approximation +\label{sec:proc:1d} + +For our setup we first compute the coefficients of the deformation--matrix and +use the formulas for *variability* and *regularity* to get our predictions. +Afterwards we solve the problem analytically to get the (normalized) correct +gradient that we use as guess for the *improvement potential*. To further test +the *improvement potential* we also consider a distorted gradient +$\vec{g}_{\mathrm{d}}$: +$$ +\vec{g}_{\mathrm{d}} = \frac{\mu \vec{g}_{\mathrm{c}} + (1-\mu)\mathbb{1}}{\|\mu \vec{g}_{\mathrm{c}} + (1-\mu) \mathbb{1}\|} +$$ +where $\mathbb{1}$ is the vector consisting of $1$ in every dimension, +$\vec{g}_\mathrm{c} = \vec{p^{*}} - \vec{p}$ is the calculated correct gradient, +and $\mu$ is used to blend between $\vec{g}_\mathrm{c}$ and $\mathbb{1}$. As +we always start with a gradient of $p = \mathbb{0}$ this means we can shorten +the definition of $\vec{g}_\mathrm{c}$ to $\vec{g}_\mathrm{c} = \vec{p^{*}}$. + +\begin{figure}[ht] +\begin{center} +\includegraphics[width=\textwidth]{img/example1d_grid.png} +\end{center} +\caption[Example of a 1D--grid]{\newline Left: A regular $7 \times 4$--grid\newline Right: The same grid after a +random distortion to generate a testcase.} +\label{fig:example1d_grid} +\end{figure} + +We then set up a regular 2--dimensional grid around the object with the desired +grid resolutions. To generate a testcase we then move the grid--vertices +randomly inside the x--y--plane. As self--intersecting grids get tricky to solve +with our implemented newtons--method (see section \ref{3dffd}) we avoid the +generation of such self--intersecting grids for our testcases. + +To achieve that we generated a gaussian distributed number with $\mu = 0, \sigma=0.25$ +and clamped it to the range $[-0.25,0.25]$. We chose such an $r \in [-0.25,0.25]$ +per dimension and moved the control--points by that factor towards their +respective neighbours^[Note: On the Edges this displacement is only applied +outwards by flipping the sign of $r$, if appropriate.]. + +In other words we set +\begin{equation*} +p_i = +\begin{cases} + p_i + (p_i - p_{i-1}) \cdot r, & \textrm{if } r \textrm{ negative} \\ + p_i + (p_{i+1} - p_i) \cdot r, & \textrm{if } r \textrm{ positive} +\end{cases} +\end{equation*} +in each dimension separately. + +An Example of such a testcase can be seen for a $7 \times 4$--grid in figure +\ref{fig:example1d_grid}. + +## Results of 1D Function Approximation + +In the case of our 1D--Optimization--problem, we have the luxury of knowing the +analytical solution to the given problem--set. We use this to experimentally +evaluate the quality criteria we introduced before. As an evolutional +optimization is partially a random process, we use the analytical solution as a +stopping--criteria. We measure the convergence speed as number of iterations the +evolutional algorithm needed to get within $1.05 \times$ of the optimal solution. + +We used different regular grids that we manipulated as explained in Section +\ref{sec:proc:1d} with a different number of control--points. As our grids have +to be the product of two integers, we compared a $5 \times 5$--grid with $25$ +control--points to a $4 \times 7$ and $7 \times 4$--grid with $28$ +control--points. This was done to measure the impact an \glqq improper\grqq \ +setup could have and how well this is displayed in the criteria we are +examining. + +Additionally we also measured the effect of increasing the total resolution of +the grid by taking a closer look at $5 \times 5$, $7 \times 7$ and $10 \times 10$ grids. + +### Variability + +\begin{figure}[tbh] +\centering +\includegraphics[width=0.7\textwidth]{img/evolution1d/variability_boxplot.png} +\caption[1D Fitting Errors for various grids]{The squared error for the various +grids we examined.\newline +Note that $7 \times 4$ and $4 \times 7$ have the same number of control--points.} +\label{fig:1dvar} +\end{figure} + +*Variability* should characterize the potential for design space exploration and +is defined in terms of the normalized rank of the deformation matrix $\vec{U}$: +$V(\vec{U}) := \frac{\textrm{rank}(\vec{U})}{n}$, whereby $n$ is the number of +vertices. +As all our tested matrices had a constant rank (being $m = x \cdot y$ for a $x \times y$ +grid), we have merely plotted the errors in the box plot in figure +\ref{fig:1dvar} + +It is also noticeable, that although the $7 \times 4$ and $4 \times 7$ grids +have a higher *variability*, they perform not better than the $5 \times 5$ grid. +Also the $7 \times 4$ and $4 \times 7$ grids differ distinctly from each other +with a mean$\pm$sigma of $233.09 \pm 12.32$ for the former and $286.32 \pm 22.36$ for the +latter, although they have the same number of control--points. This is an +indication of an impact a proper or improper grid--setup can have. We do not +draw scientific conclusions from these findings, as more research on non--squared +grids seem necessary. + +Leaving the issue of the grid--layout aside we focused on grids having the same +number of prototypes in every dimension. For the $5 \times 5$, $7 \times 7$ and +$10 \times 10$ grids we found a *very strong* correlation ($-r_S = 0.94, p = 0$) +between the *variability* and the evolutionary error. + +### Regularity + +\begin{figure}[tbh] +\centering +\includegraphics[width=\textwidth]{img/evolution1d/55_to_1010_steps.png} +\caption[Improvement potential and regularity against iterations]{\newline +Left: *Improvement potential* against number of iterations until convergence\newline +Right: *Regularity* against number of iterations until convergence\newline +Coloured by their grid--resolution, both with a linear fit over the whole +dataset.} +\label{fig:1dreg} +\end{figure} + +\begin{table}[b] +\centering +\begin{tabular}{c|c|c|c|c} +$5 \times 5$ & $7 \times 4$ & $4 \times 7$ & $7 \times 7$ & $10 \times 10$\\ +\hline +$0.28$ ($0.0045$) & \textcolor{red}{$0.21$} ($0.0396$) & \textcolor{red}{$0.1$} ($0.3019$) & \textcolor{red}{$0.01$} ($0.9216$) & \textcolor{red}{$0.01$} ($0.9185$) +\end{tabular} +\caption[Correlation 1D *regularity* against iterations]{Negated Spearman's correlation (and p--values) +between *regularity* and number of iterations for the 1D function approximation +problem. +\newline Note: Not significant results are marked in \textcolor{red}{red}. +} +\label{tab:1dreg} +\end{table} + +*Regularity* should correspond to the convergence speed (measured in +iteration--steps of the evolutionary algorithm), and is computed as inverse +condition number $\kappa(\vec{U})$ of the deformation--matrix. + +As can be seen from table \ref{tab:1dreg}, we could only show a *weak* correlation +in the case of a $5 \times 5$ grid. As we increment the number of +control--points the correlation gets worse until it is completely random in a +single dataset. Taking all presented datasets into account we even get a *strong* +correlation of $- r_S = -0.72, p = 0$, that is opposed to our expectations. + +To explain this discrepancy we took a closer look at what caused these high number +of iterations. In figure \ref{fig:1dreg} we also plotted the +*improvement potential* against the steps next to the *regularity*--plot. Our theory +is that the *very strong* correlation ($-r_S = -0.82, p=0$) between +*improvement potential* and number of iterations hints that the employed +algorithm simply takes longer to converge on a better solution (as seen in +figure \ref{fig:1dvar} and \ref{fig:1dimp}) offsetting any gain the +regularity--measurement could achieve. + +### Improvement Potential + +\begin{figure}[ht] +\centering +\includegraphics[width=0.8\textwidth]{img/evolution1d/55_to_1010_improvement-vs-evo-error.png} +\caption[Correlation 1D Improvement vs. Error]{*Improvement potential* plotted +against the error yielded by the evolutionary optimization for different +grid--resolutions} +\label{fig:1dimp} +\end{figure} + +The *improvement potential* should correlate to the quality of the +fitting--result. We plotted the results for the tested grid--sizes $5 \times 5$, +$7 \times 7$ and $10 \times 10$ in figure \ref{fig:1dimp}. We tested the +$4 \times 7$ and $7 \times 4$ grids as well, but omitted them from the plot. + +Additionally we tested the results for a distorted gradient described in +\ref{sec:proc:1d} with a $\mu$--value of $0.25$, $0.5$, $0,75$, and $1.0$ for +the $5 \times 5$ grid and with a $\mu$--value of $0.5$ for all other cases. + +All results show the identical *very strong* and *significant* correlation with +a Spearman--coefficient of $- r_S = 1.0$ and p--value of $0$. + +These results indicate, that $\|\mathbb{1} - \vec{U}\vec{U}^{+}\|_F$ is close to $0$, +reducing the impacts of any kind of gradient. Nevertheless, the improvement +potential seems to be suited to make estimated guesses about the quality of a +fit, even lacking an exact gradient. + +## Procedure: 3D Function Approximation +\label{sec:proc:3dfa} + +As explained in section \ref{sec:test:3dfa} in detail, we do not know the +analytical solution to the global optimum. Additionally we have the problem of +finding the right correspondences between the original sphere--model and the +target--model, as they consist of $10\,807$ and $12\,024$ vertices respectively, +so we cannot make a one--to--one--correspondence between them as we did in the +one--dimensional case. + +Initially we set up the correspondences $\vec{c_T(\dots)}$ and $\vec{c_S(\dots)}$ to be +the respectively closest vertices of the other model. We then calculate the +analytical solution given these correspondences via $\vec{P^{*}} = \vec{U^+}\vec{T}$, +and also use the first solution as guessed gradient for the calculation of the +*improvement potential*, as the optimal solution is not known. +We then let the evolutionary algorithm run up within $1.05$ times the error of +this solution and afterwards recalculate the correspondences $\vec{c_T(\dots)}$ +and $\vec{c_S(\dots)}$. + +\begin{figure}[ht] +\begin{center} +\includegraphics[width=\textwidth]{img/example3d_grid.png} +\end{center} +\caption[Example of a 3D--grid]{\newline Left: The 3D--setup with a $4\times +4\times 4$--grid.\newline Right: The same grid after added noise to the +control--points.} +\label{fig:setup3d} +\end{figure} + +For the next step we then halve the regularization--impact $\lambda$ (starting +at $1$) of our *fitness--function* (\ref{eq:fit3d}) and calculate the next +incremental solution $\vec{P^{*}} = \vec{U^+}\vec{T}$ with the updated +correspondences (again, mapping each vertex to its closest neighbour in the +respective other model) to get our next target--error. We repeat this process as +long as the target--error keeps decreasing and use the number of these +iterations as measure of the convergence speed. As the resulting evolutional +error without regularization is in the numeric range of $\approx 100$, whereas +the regularization is numerically $\approx 7000$ we need at least $10$ to $15$ +iterations until the regularization--effect wears off. + +The grid we use for our experiments is just very coarse due to computational +limitations. We are not interested in a good reconstruction, but an estimate if +the mentioned evolvability--criteria are good. + +In figure \ref{fig:setup3d} we show an example setup of the scene with a +$4\times 4\times 4$--grid. Identical to the 1--dimensional scenario before, we create a +regular grid and move the control--points in the exact same random manner between +their neighbours as described in section \ref{sec:proc:1d}, but in three instead +of two dimensions^[Again, we flip the signs for the edges, if necessary to have +the object still in the convex hull.]. + +\begin{figure}[!htb] +\includegraphics[width=\textwidth]{img/3d_grid_resolution.png} +\caption[Different resolution of 3D grids]{\newline +Left: A $7 \times 4 \times 4$ grid suited to better deform into facial +features.\newline +Right: A $4 \times 4 \times 7$ grid that we expect to perform worse.} +\label{fig:3dgridres} +\end{figure} + +As is clearly visible from figure \ref{fig:3dgridres}, the target--model has many +vertices in the facial area, at the ears and in the neck--region. Therefore we +chose to increase the grid--resolutions for our tests in two different dimensions +and see how well the criteria predict a suboptimal placement of these +control--points. + +## Results of 3D Function Approximation + +In the 3D--Approximation we tried to evaluate further on the impact of the +grid--layout to the overall criteria. As the target--model has many vertices in +concentrated in the facial area we start from a $4 \times 4 \times 4$ grid and +only increase the number of control--points in one dimension, yielding a +resolution of $7 \times 4 \times 4$ and $4 \times 4 \times 7$ respectively. We +visualized those two grids in figure \ref{fig:3dgridres}. + +To evaluate the performance of the evolvability--criteria we also tested a more +neutral resolution of $4 \times 4 \times 4$, $5 \times 5 \times 5$, and $6 \times 6 \times 6$ --- +similar to the 1D--setup. + +\begin{figure}[ht] +\centering +\includegraphics[width=0.7\textwidth]{img/evolution3d/variability_boxplot.png} +\caption[3D Fitting Errors for various grids]{The fitting error for the various +grids we examined.\newline +Note that the number of control--points is a product of the resolution, so $X +\times 4 \times 4$ and $4 \times 4 \times X$ have the same number of +control--points.} +\label{fig:3dvar} +\end{figure} + +### Variability +\label{sec:res:3d:var} + +\begin{table}[tbh] +\centering +\begin{tabular}{c|c|c|c} +$4 \times 4 \times \mathrm{X}$ & $\mathrm{X} \times 4 \times 4$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ & all \\ +\hline +0.89 (0) & 0.9 (0) & 0.91 (0) & 0.94 (0) +\end{tabular} +\caption[Correlation between *variability* and fitting error for 3D]{Correlation +between *variability* and fitting error for the 3D fitting scenario.\newline +Displayed are the negated Spearman coefficients with the corresponding p--values +in brackets for three cases of increasing *variability* ($\mathrm{X} \in [4,5,7], +\mathrm{Y} \in [4,5,6]$). +\newline Note: Not significant results are marked in \textcolor{red}{red}.} +\label{tab:3dvar} +\end{table} + +Similar to the 1D case all our tested matrices had a constant rank (being +$m = x \cdot y \cdot z$ for a $x \times y \times z$ grid), so we again have merely plotted +the errors in the box plot in figure \ref{fig:3dvar}. + +As expected the $\mathrm{X} \times 4 \times 4$ grids performed +slightly better than their $4 \times 4 \times \mathrm{X}$ counterparts with a +mean$\pm$sigma of $101.25 \pm 7.45$ to $102.89 \pm 6.74$ for $\mathrm{X} = 5$ and +$85.37 \pm 7.12$ to $89.22 \pm 6.49$ for $\mathrm{X} = 7$. + +Interestingly both variants end up closer in terms of fitting error than we +anticipated, which shows that the evolutionary algorithm we employed is capable +of correcting a purposefully created \glqq bad\grqq \ grid. Also this confirms, +that in our cases the number of control--points is more important for quality +than their placement, which is captured by the *variability* via the rank of the +deformation--matrix. + +Overall the correlation between *variability* and fitness--error were +*significant* and showed a *very strong* correlation in all our tests. +The detailed correlation--coefficients are given in table \ref{tab:3dvar} +alongside their p--values. + +As introduces in section \ref{sec:impl:grid} and visualized in figure +\ref{fig:enoughCP}, we know, that not all control--points have to necessarily +contribute to the parametrization of our 3D--model. Because we are starting from +a sphere, some control--points are too far away from the surface to contribute +to the deformation at all. + +One can already see in 2D in figure \ref{fig:enoughCP}, that this effect +starts with a regular $9 \times 9$ grid on a perfect circle. To make sure we +observe this, we evaluated the *variability* for 100 randomly moved $10 \times 10 \times 10$ +grids on the sphere we start out with. + +\begin{figure}[hbt] +\centering +\includegraphics[width=0.8\textwidth]{img/evolution3d/variability2_boxplot.png} +\caption[Histogram of ranks of high--resolution deformation--matrices]{ +Histogram of ranks of various $10 \times 10 \times 10$ grids with $1000$ +control--points each showing in this case how many control--points are actually +used in the calculations. +} +\label{fig:histrank3d} +\end{figure} + +As the *variability* is defined by $\frac{\mathrm{rank}(\vec{U})}{n}$ we can +easily recover the rank of the deformation--matrix $\vec{U}$. The results are +shown in the histogram in figure \ref{fig:histrank3d}. Especially in the centre +of the sphere and in the corners of our grid we effectively loose +control--points for our parametrization. + +This of course yields a worse error as when those control--points would be put +to use and one should expect a loss in quality evident by a higher +reconstruction--error opposed to a grid where they are used. Sadly we could not +run a in--depth test on this due to computational limitations. + +Nevertheless this hints at the notion, that *variability* is a good measure for +the overall quality of a fit. + +### Regularity + +\begin{table}[tbh] +\centering +\begin{tabular}{c|c|c|c} + & $5 \times 4 \times 4$ & $7 \times 4 \times 4$ & $\mathrm{X} \times 4 \times 4$ \\ +\cline{2-4} + & \textcolor{red}{0.15} (0.147) & \textcolor{red}{0.09} (0.37) & 0.46 (0) \B \\ +\cline{2-4} +\multicolumn{4}{c}{} \\[-1.4em] +\hline +$4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \times 4 \times \mathrm{X}$ \T \\ +\hline +0.38 (0) & \textcolor{red}{0.17} (0.09) & 0.40 (0) & 0.46 (0) \B \\ +\hline +\multicolumn{4}{c}{} \\[-1.4em] +\cline{2-4} + & $5 \times 5 \times 5$ & $6 \times 6 \times 6$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ \T \\ +\cline{2-4} + & \textcolor{red}{-0.18} (0.0775) & \textcolor{red}{-0.13} (0.1715) & -0.25 (0) \B \\ +\cline{2-4} +\multicolumn{4}{c}{} \\[-1.4em] +\cline{2-4} +\multicolumn{3}{c}{} & all: 0.15 (0) \T +\end{tabular} +\caption[Correlation between *regularity* and iterations for 3D]{Correlation +between *regularity* and number of iterations for the 3D fitting scenario. +Displayed are the negated Spearman coefficients with the corresponding p--values +in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). +\newline Note: Not significant results are marked in \textcolor{red}{red}.} +\label{tab:3dreg} +\end{table} + +Opposed to the predictions of *variability* our test on *regularity* gave a mixed +result --- similar to the 1D--case. + +In roughly half of the scenarios we have a *significant*, but *weak* to *moderate* +correlation between *regularity* and number of iterations. On the other hand in +the scenarios where we increased the number of control--points, namely $125$ for +the $5 \times 5 \times 5$ grid and $216$ for the $6 \times 6 \times 6$ grid we found +a *significant*, but *weak* **anti**--correlation when taking all three tests into +account^[Displayed as $Y \times Y \times Y$], which seem to contradict the +findings/trends for the sets with $64$, $80$, and $112$ control--points +(first two rows of table \ref{tab:3dreg}). + +Taking all results together we only find a *very weak*, but *significant* link +between *regularity* and the number of iterations needed for the algorithm to +converge. + +\begin{figure}[!htb] +\centering +\includegraphics[width=\textwidth]{img/evolution3d/regularity_montage.png} +\caption[Regularity for different 3D--grids]{ +Plots of *regularity* against number of iterations for various scenarios together +with a linear fit to indicate trends.} +\label{fig:resreg3d} +\end{figure} + +As can be seen from figure \ref{fig:resreg3d}, we can observe that increasing +the number of control--points helps the convergence--speeds. The +regularity--criterion first behaves as we would like to, but then switches to +behave exactly opposite to our expectations, as can be seen in the first three +plots. While the number of control--points increases from red to green to blue +and the number of iterations decreases, the *regularity* seems to increase at +first, but then decreases again on higher grid--resolutions. + +This can be an artefact of the definition of *regularity*, as it is defined by the +inverse condition--number of the deformation--matrix $\vec{U}$, being the +fraction $\frac{\sigma_{\mathrm{min}}}{\sigma_{\mathrm{max}}}$ between the +least and greatest right singular value. + +As we observed in the previous section, we cannot +guarantee that each control--point has an effect (see figure \ref{fig:histrank3d}) +and so a small minimal right singular value occurring on higher +grid--resolutions seems likely the problem. + +Adding to this we also noted, that in the case of the $10 \times 10 \times +10$--grid the *regularity* was always $0$, as a non--contributing control--point +yields a $0$--column in the deformation--matrix, thus letting +$\sigma_\mathrm{min} = 0$. A better definition for *regularity* (i.e. using the +smallest non--zero right singular value) could solve this particular issue, but +not fix the trend we noticed above. + +### Improvement Potential + +\begin{table}[tbh] +\centering +\begin{tabular}{c|c|c|c} + & $5 \times 4 \times 4$ & $7 \times 4 \times 4$ & $\mathrm{X} \times 4 \times 4$ \\ +\cline{2-4} + & 0.3 (0.0023) & \textcolor{red}{0.23} (0.0233) & 0.89 (0) \B \\ +\cline{2-4} +\multicolumn{4}{c}{} \\[-1.4em] +\hline +$4 \times 4 \times 4$ & $4 \times 4 \times 5$ & $4 \times 4 \times 7$ & $4 \times 4 \times \mathrm{X}$ \T \\ +\hline +0.5 (0) & 0.38 (0) & 0.32 (0.0012) & 0.9 (0) \B \\ +\hline +\multicolumn{4}{c}{} \\[-1.4em] +\cline{2-4} + & $5 \times 5 \times 5$ & $6 \times 6 \times 6$ & $\mathrm{Y} \times \mathrm{Y} \times \mathrm{Y}$ \T \\ +\cline{2-4} + & 0.47 (0) & \textcolor{red}{-0.01} (0.8803) & 0.89 (0) \B \\ +\cline{2-4} +\multicolumn{4}{c}{} \\[-1.4em] +\cline{2-4} +\multicolumn{3}{c}{} & all: 0.95 (0) \T +\end{tabular} +\caption[Correlation between *improvement potential* and fitting--error for 3D]{Correlation +between *improvement potential* and fitting--error for the 3D fitting scenario. +Displayed are the negated Spearman coefficients with the corresponding p--values +in brackets for various given grids ($\mathrm{X} \in [4,5,7], \mathrm{Y} \in [4,5,6]$). +\newline Note: Not significant results are marked in \textcolor{red}{red}.} +\label{tab:3dimp} +\end{table} + +Comparing to the 1D--scenario, we do not know the optimal solution to the given +problem and for the calculation we only use the initial gradient produced by the +initial correlation between both objects. This gradient changes with every +iteration and will be off our first guess very quickly. This is the reason we +are not trying to create artificially bad gradients, as we have a broad range in +quality of such gradients anyway. + +\begin{figure}[htb] +\centering +\includegraphics[width=\textwidth]{img/evolution3d/improvement_montage.png} +\caption[Improvement potential for different 3D--grids]{ +Plots of *improvement potential* against error given by our *fitness--function* +after convergence together with a linear fit of each of the plotted data to +indicate trends.} +\label{fig:resimp3d} +\end{figure} + +We plotted our findings on the *improvement potential* in a similar way as we did +before with the *regularity*. In figure \ref{fig:resimp3d} one can clearly see the +correlation and the spread within each setup and the behaviour when we increase +the number of control--points. + +Along with this we also give the Spearman--coefficients along with their +p--values in table \ref{tab:3dimp}. Within one scenario we only find a *weak* to +*moderate* correlation between the *improvement potential* and the fitting error, +but all findings (except for $7 \times 4 \times 4$ and $6 \times 6 \times 6$) +are significant. + +If we take multiple datasets into account the correlation is *very strong* and +*significant*, which is good, as this functions as a litmus--test, because the +quality is naturally tied to the number of control--points. + +All in all the *improvement potential* seems to be a good and sensible measure of +quality, even given gradients of varying quality. + +Lastly, a small note on the behaviour of *improvement potential* and convergence +speed, as we used this in the 1D case to argue, why the *regularity* defied our +expectations. As a contrast we wanted to show, that *improvement potential* cannot +serve for good predictions of the convergence speed. In figure +\ref{fig:imp1d3d} we show *improvement potential* against number of iterations +for both scenarios. As one can see, in the 1D scenario we have a *strong* +and *significant* correlation (with $-r_S = -0.72$, $p = 0$), whereas in the 3D +scenario we have the opposite *significant* and *strong* effect (with +$-r_S = 0.69$, $p=0$), so these correlations clearly seem to be dependent on the +scenario and are not suited for generalization. + +\begin{figure}[hbt] +\centering +\includegraphics[width=\textwidth]{img/imp1d3d.png} +\caption[Improvement potential and convergence speed\newline for 1D and 3D--scenarios]{ +\newline +Left: *Improvement potential* against convergence speed for the +1D--scenario\newline +Right: *Improvement potential* against convergence speed for the 3D--scnario +} +\label{fig:imp1d3d} +\end{figure} + +# Discussion and outlook +\label{sec:dis} + +In this thesis we took a look at the different criteria for *evolvability* as +introduced by Richter et al.\cite{anrichterEvol}, namely *variability*, +*regularity* and *improvement potential* under different setup--conditions. +Where Richter et al. used \acf{RBF}, we employed \acf{FFD} to set up a +low--complexity parametrization of a more complex vertex--mesh. + +In our findings we could show in the 1D--scenario, that there were statistically +*significant* *very strong* correlations between *variability and fitting error* +($0.94$) and *improvement potential and fitting error* ($1.0$) with +comparable results than Richter et al. (with $0.31$ to $0.88$ +for the former and $0.75$ to $0.99$ for the latter), whereas we found +only *weak* correlations for *regularity and convergence--speed* ($0.28$) +opposed to Richter et al. with $0.39$ to $0.91$.^[We only took statistically +*significant* results into consideration when compiling these numbers. Details +are given in the respective chapters.] + +For the 3D--scenario our results show a *very strong*, *significant* correlation +between *variability and fitting error* with $0.89$ to $0.94$, which are pretty +much in line with the findings of Richter et al. ($0.65$ to $0.95$). The +correlation between *improvement potential and fitting error* behave similar, +with our findings having a significant coefficient of $0.3$ to $0.95$ depending +on the grid--resolution compared to the $0.61$ to $0.93$ from Richter et al. +In the case of the correlation of *regularity and convergence speed* we found +very different (and often not significant) correlations and anti--correlations +ranging from $-0.25$ to $0.46$, whereas Richter et al. reported correlations +between $0.34$ to $0.87$. + +Taking these results into consideration, one can say, that *variability* and +*improvement potential* are very good estimates for the quality of a fit using +\acf{FFD} as a deformation function, while we could not reproduce similar +compelling results as Richter et al. for *regularity and convergence speed*. + +One reason for the bad or erratic behaviour of the *regularity*--criterion could +be that in an \ac{FFD}--setting we have a likelihood of having control--points +that are only contributing to the whole parametrization in negligible amounts, +resulting in very small right singular values of the deformation--matrix +$\vec{U}$ that influence the condition--number and thus the *regularity* in a +significant way. Further research is needed to refine *regularity* so that these +problems get addressed, like taking all singular values into account when +capturing the notion of *regularity*. + +Richter et al. also compared the behaviour of direct and indirect manipulation +in \cite{anrichterEvol}, whereas we merely used an indirect \ac{FFD}--approach. +As direct manipulations tend to perform better than indirect manipulations, the +usage of \acf{DM--FFD} could also work better with the criteria we examined. +This can also solve the problem of bad singular values for the *regularity* as +the incorporation of the parametrization of the points on the surface --- which +are the essential part of a direct--manipulation --- could cancel out a bad +control--grid as the bad control--points are never or negligibly used to +parametrize those surface--points. diff --git a/presentation/template/agcg-pdf.css b/presentation/template/agcg-pdf.css new file mode 100755 index 0000000..360cd02 --- /dev/null +++ b/presentation/template/agcg-pdf.css @@ -0,0 +1,16 @@ +/* MARIO */ +.reveal .comment { + position: absolute; + margin: auto; + right: auto; + bottom: auto; + width: auto; + height: auto; +} + +.reveal footer { + position: absolute; + display: block; + overflow: visible; + margin: auto; +} diff --git a/presentation/template/agcg.css b/presentation/template/agcg.css new file mode 100755 index 0000000..82c03d7 --- /dev/null +++ b/presentation/template/agcg.css @@ -0,0 +1,725 @@ +/** + * White theme for reveal.js. This is the opposite of the 'black' theme. + * + * By Hakim El Hattab, http://hakim.se + */ + + +@import url(lato/lato.css); + + + +/********************************************* + * GLOBAL STYLES + *********************************************/ + +body { + background: #ffffff; + background-color: #ffffff; +} + +.reveal { + font-family: "Lato", sans-serif; + font-size: 36px; + font-weight: normal; + color: #222; } + + +::selection { + color: #fff; + background: #98bdef; + text-shadow: none; } + +.reveal .slides > section, +.reveal .slides > section > section { + line-height: 1.3; + font-weight: inherit; +} + + +/********************************************* + * MARIO: ALIGNMENT & COLUMNS + *********************************************/ + +.reveal .float-left { + float: left; +} + +.reveal .float-right { + float: right; +} + +.reveal .left { + text-align: left; +} + +.reveal .center { + text-align: center; +} + +.reveal .right { + text-align: right; +} + +.reveal .small { + font-size: 0.7em; +} + +.reveal .tiny { + font-size: 0.6em; +} + +.reveal .tt { + font-family: monospace; +} + +.reveal .boxed { + border: 1px solid black; + padding: 10px; +} + +.reveal .w10 { width:10%; } +.reveal .w20 { width:20%; } +.reveal .w30 { width:30%; } +.reveal .w40 { width:40%; } +.reveal .w50 { width:50%; } +.reveal .w60 { width:60%; } +.reveal .w70 { width:70%; } +.reveal .w80 { width:80%; } +.reveal .w90 { width:90%; } + +.reveal .col10 { float:left; width:10%; } +.reveal .col20 { float:left; width:20%; } +.reveal .col30 { float:left; width:30%; } +.reveal .col40 { float:left; width:40%; } +.reveal .col50 { float:left; width:50%; } +.reveal .col60 { float:left; width:60%; } +.reveal .col70 { float:left; width:70%; } +.reveal .col80 { float:left; width:80%; } +.reveal .col90 { float:left; width:90%; } + + +/********************************************* + * HEADERS + *********************************************/ +.reveal h1, +.reveal h2, +.reveal h3, +.reveal h4, +.reveal h5, +.reveal h6 { + margin: 0 0 20px 0; + font-weight: bold; + line-height: 1.2; + letter-spacing: normal; + text-shadow: none; + word-wrap: break-word; + text-align: center; +} + +.reveal h1 { + margin-bottom: 40px; + font-size: 1.50em; +} + +.reveal h2 { + font-size: 1.3em; } + +.reveal h3 { + font-size: 1.1em; } + +.reveal h4 { + font-size: 1em; } + +.reveal h1 { + text-shadow: none; } + +/********************************************* + * OTHER + *********************************************/ +.reveal p { + margin: 20px 0; + line-height: 1.3; } + +/* Ensure certain elements are never larger than the slide itself */ +.reveal img, +.reveal video, +.reveal iframe { + max-width: 95%; + max-height: 95%; } + +.reveal strong { + font-weight: bold; +} + +.reveal .boldblue { + font-weight: bold; + color: #2a9ddf; +} + +.reveal b { + font-weight: bold; } + +.reveal em { + font-style: italic; } + +.reveal del { + color: red; +} + +.reveal ol, +.reveal dl, +.reveal ul { + display: inline-block; + text-align: left; + margin-bottom: 1em; +} + +.reveal ol li, +.reveal dl li, +.reveal ul li { + margin-bottom: 0.5em; + margin-left: 40px; +} + +.reveal ol { + list-style-type: decimal; } + +.reveal ul { + list-style-type: disc; } + +.reveal ul ul, +.reveal ol ul { + list-style-type: circle; +} + +.reveal ol ol { + list-style-type: lower-roman; +} + +.reveal ul ul ul, +.reveal ul ul ol, +.reveal ul ol ul, +.reveal ul ol ol, +.reveal ol ul ul, +.reveal ol ul ol, +.reveal ol ol ul, +.reveal ol ol ol { + font-size: 0.9em; +} + +.reveal ul ul, +.reveal ul ol, +.reveal ol ol, +.reveal ol ul { + display: block; + margin-top: 0.3em; + font-size: 0.8em; + margin-left: 10px; +} + +.reveal dt { + font-weight: bold; } + +.reveal dd { + margin-left: 40px; } + +.reveal q, +.reveal blockquote { + quotes: none; } + +.reveal blockquote { + display: block; + text-align: left; + font-size: 0.9em; + position: relative; + width: 70%; + margin: 20px auto; + padding: 0px 15px; + font-style: italic; + background: rgba(255, 255, 255, 0.05); + box-shadow: 0px 0px 2px rgba(0, 0, 0, 0.2); } + +.reveal blockquote p:first-child, +.reveal blockquote p:last-child { + display: inline-block; } + +.reveal q { + font-style: italic; } + + +/********************************************* + * CODE + *********************************************/ + +/* box around block of code */ +.reveal pre { + display: block; + position: relative; + width: 100%; + margin: 10px auto; + text-align: center; +} + +/* inline code */ +.reveal code { + display: inline; + padding: 0.2em; + text-align: left; + font-size: 85%; + font-family: monospace; + word-wrap: normal; + background-color: #f7f7f7; +} + +/* block of code (has to override above inline code)*/ +.reveal pre code { + display: block; + margin: 10px; + text-align: left; + font-size: 0.5em; + font-family: monospace; + line-height: 1.45em; + padding: 16px; + overflow: auto; + max-height: 550px; + word-wrap: normal; + background-color: #f7f7f7; +} + +.reveal pre code.small { + font-size: 0.4em; +} + + +/********************************************* + * TABLES + *********************************************/ + +.reveal table { + margin: auto; + border-collapse: collapse; + border-spacing: 2px; + //border-top: 2px solid black; + //border-bottom: 2px solid black; + text-align: center; +} + +.reveal table th { + font-weight: bold; } + +.reveal table th { + border-bottom: 1px solid; +} + +.reveal table th, +.reveal table td { + text-align: left; + padding: 0.2em 0.5em 0.2em 0.5em; +} + +.reveal table th[align="center"], +.reveal table td[align="center"] { + text-align: center; } + +.reveal table th[align="right"], +.reveal table td[align="right"] { + text-align: right; } + +.reveal table tbody tr:last-child th, +.reveal table tbody tr:last-child td { + border-bottom: none; } + +.reveal sup { + vertical-align: super; + font-size: 0.7em; +} + +.reveal sub { + vertical-align: sub; + font-size: 0.7em; +} + +.reveal small { + display: inline-block; + font-size: 0.6em; + line-height: 1.2em; + vertical-align: top; } + +.reveal small * { + vertical-align: top; } + +/********************************************* + * LINKS + *********************************************/ +.reveal a { + color: #2a76dd; + text-decoration: none; + -webkit-transition: color .15s ease; + -moz-transition: color .15s ease; + transition: color .15s ease; } + +.reveal a:hover { + color: #6ca0e8; + text-shadow: none; + border: none; } + +.reveal .roll span:after { + color: #fff; + background: #1a53a1; } + +/********************************************* + * IMAGES + *********************************************/ +/* +.reveal section img { + margin: 15px 0px; + background: rgba(255, 255, 255, 0.12); + border: 4px solid #222; + box-shadow: 0 0 10px rgba(0, 0, 0, 0.15); } +*/ + +.reveal section img { + /*margin: 15px 0px;*/ + margin-bottom: 15px; + border: 0; + box-shadow: none; } + +.reveal section img.plain { + border: 0; + box-shadow: none; } + +.reveal a img { + -webkit-transition: all .15s linear; + -moz-transition: all .15s linear; + transition: all .15s linear; } + +.reveal a:hover img { + background: rgba(255, 255, 255, 0.2); + border-color: #2a76dd; + box-shadow: 0 0 20px rgba(0, 0, 0, 0.55); } + + +.reveal figure { + display: inline-block; +// vertical-align: top; +} + +.reveal figure img { + margin: 5px 0px 0px 0px; + border: 0; + box-shadow: none; +} + +.reveal figure figcaption { + margin: 0px; + line-height: 1.2; + font-style: italic; + font-size: 0.6em; + text-align: center; +} + + +/********************************************* + * NAVIGATION CONTROLS + *********************************************/ +.reveal .controls .navigate-left, +.reveal .controls .navigate-left.enabled { + border-right-color: #2a76dd; } + +.reveal .controls .navigate-right, +.reveal .controls .navigate-right.enabled { + border-left-color: #2a76dd; } + +.reveal .controls .navigate-up, +.reveal .controls .navigate-up.enabled { + border-bottom-color: #2a76dd; } + +.reveal .controls .navigate-down, +.reveal .controls .navigate-down.enabled { + border-top-color: #2a76dd; } + +.reveal .controls .navigate-left.enabled:hover { + border-right-color: #6ca0e8; } + +.reveal .controls .navigate-right.enabled:hover { + border-left-color: #6ca0e8; } + +.reveal .controls .navigate-up.enabled:hover { + border-bottom-color: #6ca0e8; } + +.reveal .controls .navigate-down.enabled:hover { + border-top-color: #6ca0e8; } + +/********************************************* + * PROGRESS BAR + *********************************************/ +.reveal .progress { + background: rgba(0, 0, 0, 0.2); } + +.reveal .progress span { + background: #2a9ddf; + -webkit-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + -moz-transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); + transition: width 800ms cubic-bezier(0.26, 0.86, 0.44, 0.985); } + + + +/********************************************* + * MARIO: TITLE SLIDE + *********************************************/ + +.white-on-blue { + color: #ffffff; + background-color: #2a9ddf; +} + +.reveal .title { + margin: 30px 0 0 0; + font-weight: bold; + font-style: normal; + font-size: 1.5em; + text-align: center; +} + +.reveal .subtitle { + margin: 10px 0 0 0; + font-weight: normal; + font-style: italic; + font-size: 1.3em; + text-align: center; +} + +.reveal .author { + margin: 50px 0 0 0; + font-weight: normal; + font-style: normal; + font-size: 1.0em; + text-align: center; +} + +.reveal .affiliation { + margin: 10px 0 30px 0; + font-weight: normal; + font-style: normal; + font-size: 1.0em; + text-align: center; +} + + +/********************************************* + * MARIO: SECTIONS + *********************************************/ + +.reveal .section-title h1 { + /* white on blue */ + color: #ffffff; + background-color: #2a9ddf; + + /* large top margin -> vertical centering */ + margin: 100px auto; + padding: 50px; + + /* large bold font */ + font-weight: bold; + font-style: normal; + font-size: 1.5em; +} + + +/********************************************* + * MARIO: FRAGMENT ANIMATION + *********************************************/ + +.fragment.current-visible.visible:not(.current-fragment) { + display: none; + height:0px; + line-height: 0px; + font-size: 0px; +} + + +/********************************************* + * MARIO: WER WIRD MILLIONAER + *********************************************/ + +.reveal .slides > section { + counter-reset: wwm-counter; +} + +.reveal .answer { + display: inline-block; + position: relative; + width: 400px; + text-align: left; + margin: 20px; + border: 3px solid #2a9ddf; + border-radius: 20px; + padding: 20px; + font-weight: normal; + color: black; +} + +.reveal .answer:before { + content: counter(wwm-counter, upper-latin) ": "; + counter-increment: wwm-counter; + margin-right: 0.5em; + font-weight: bold; +} + +.reveal .tooltip { + visibility: hidden; + max-width: 390px; + top: 100%; + background-color: grey; + color: #ffffff; + text-align: center; + padding: 5px; + border-radius: 6px; + position: absolute; + z-index: 1; + font-size: 0.5em; +} + +.reveal .show-wrong { + background-color: #ffaaaa; + border: 3px solid red; +} + +.reveal .show-right { + background-color: #aaffaa; + border: 3px solid green; +} + +.reveal .show-wrong:hover .tooltip, .reveal .show-right:hover .tooltip { + visibility: visible; +} + + + +/********************************************* + * MARIO: COMMENT BUBBLES + *********************************************/ + +.reveal .bubble { + color: #ffffff; + background-color: #2a9ddf; + padding:10px; + border-radius:5px; + box-shadow: 0px 0px 30px 0px rgba(0,0,0,0.35); +} + +.reveal .comment { + color: #ffffff; + background-color: #2a9ddf; + position:absolute; + padding:10px; + border-radius:5px; + font-size: 0.5em; + box-shadow: 0px 0px 30px 0px rgba(0,0,0,0.35); +} + +.reveal .comment-left { + color: #ffffff; + background-color: #2a9ddf; + position:absolute; + padding:5px; + font-size: 80%; + border-radius:20px 20px 0px 20px; + box-shadow: 0px 0px 30px 0px rgba(0,0,0,0.35); +} + +.reveal .comment-right { + color: #ffffff; + background-color: #2a9ddf; + position:absolute; + padding:5px; + font-size: 80%; + border-radius:20px 20px 20px 0px; + box-shadow: 0px 0px 30px 0px rgba(0,0,0,0.35); +} + +.reveal .comment-big { + color: #ffffff; + background-color: #2a9ddf; + padding:10px; + border-radius:5px; + box-shadow: 0px 0px 30px 0px rgba(0,0,0,0.35); +} + + +/********************************************* + * MARIO: FOOTER COMMENTS + *********************************************/ + +.reveal footer { + position: absolute; + text-align: center; + margin: auto; + padding: 5px; + font-size: 0.5em; + left: 200px; + width: 624px; + top: 750px; +} + +/********************************************* + * MARIO: GENERIC HIGHLIGHT + *********************************************/ + +.reveal .highlight { + background-color: #fdfbaa; +} + +/********************************************* + * MARIO: margin: auto + *********************************************/ + +.reveal .automargin { + margin: 10px auto; +} + +/********************************************* + * MARIO: ADD SHADOWS (e.g. to images) + *********************************************/ + +.reveal .shadow { + box-shadow: 0px 0px 10px 0px rgba(0,0,0,0.25); +} + +.reveal table p { + margin: 0px; +} + +.reveal .neg80 { + margin-top: -80px; +} + +.reveal .neg60 { + margin-top: -60px; +} + +/* class for aligning divs in a row */ +.reveal .ilb { + display: inline-block; + vertical-align: top; +} + +/********************************************* + * MARIO: MathJax equation should get some + vertical distance to text before them + *********************************************/ + +.reveal .MathJax_Display { + margin-top: 0.5em; +} + + +/********************************************* + * Stefan: small helper class for doing + * a clear: both; after columns. + *********************************************/ + +.reveal .clearFloat { + clear:both; +} diff --git a/presentation/template/font-awesome b/presentation/template/font-awesome new file mode 160000 index 0000000..c754197 --- /dev/null +++ b/presentation/template/font-awesome @@ -0,0 +1 @@ +Subproject commit c754197d91b9b615349ff22fdae393905015ffff diff --git a/presentation/template/lato/LatoLatin-Black.eot b/presentation/template/lato/LatoLatin-Black.eot new file mode 100755 index 0000000..d41103b Binary files /dev/null and b/presentation/template/lato/LatoLatin-Black.eot differ diff --git a/presentation/template/lato/LatoLatin-Black.ttf b/presentation/template/lato/LatoLatin-Black.ttf new file mode 100755 index 0000000..45c55e4 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Black.ttf differ diff --git a/presentation/template/lato/LatoLatin-Black.woff b/presentation/template/lato/LatoLatin-Black.woff new file mode 100755 index 0000000..d1e2579 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Black.woff differ diff --git a/presentation/template/lato/LatoLatin-Black.woff2 b/presentation/template/lato/LatoLatin-Black.woff2 new file mode 100755 index 0000000..4127b4d Binary files /dev/null and b/presentation/template/lato/LatoLatin-Black.woff2 differ diff --git a/presentation/template/lato/LatoLatin-BlackItalic.eot b/presentation/template/lato/LatoLatin-BlackItalic.eot new file mode 100755 index 0000000..74ea622 Binary files /dev/null and b/presentation/template/lato/LatoLatin-BlackItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-BlackItalic.ttf b/presentation/template/lato/LatoLatin-BlackItalic.ttf new file mode 100755 index 0000000..76563c2 Binary files /dev/null and b/presentation/template/lato/LatoLatin-BlackItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-BlackItalic.woff b/presentation/template/lato/LatoLatin-BlackItalic.woff new file mode 100755 index 0000000..142c1c9 Binary files /dev/null and b/presentation/template/lato/LatoLatin-BlackItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-BlackItalic.woff2 b/presentation/template/lato/LatoLatin-BlackItalic.woff2 new file mode 100755 index 0000000..e9862e6 Binary files /dev/null and b/presentation/template/lato/LatoLatin-BlackItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Bold.eot b/presentation/template/lato/LatoLatin-Bold.eot new file mode 100755 index 0000000..d90b47b Binary files /dev/null and b/presentation/template/lato/LatoLatin-Bold.eot differ diff --git a/presentation/template/lato/LatoLatin-Bold.ttf b/presentation/template/lato/LatoLatin-Bold.ttf new file mode 100755 index 0000000..c598c24 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Bold.ttf differ diff --git a/presentation/template/lato/LatoLatin-Bold.woff b/presentation/template/lato/LatoLatin-Bold.woff new file mode 100755 index 0000000..cdfcbe0 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Bold.woff differ diff --git a/presentation/template/lato/LatoLatin-Bold.woff2 b/presentation/template/lato/LatoLatin-Bold.woff2 new file mode 100755 index 0000000..2615c85 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Bold.woff2 differ diff --git a/presentation/template/lato/LatoLatin-BoldItalic.eot b/presentation/template/lato/LatoLatin-BoldItalic.eot new file mode 100755 index 0000000..17216ef Binary files /dev/null and b/presentation/template/lato/LatoLatin-BoldItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-BoldItalic.ttf b/presentation/template/lato/LatoLatin-BoldItalic.ttf new file mode 100755 index 0000000..c1f225a Binary files /dev/null and b/presentation/template/lato/LatoLatin-BoldItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-BoldItalic.woff b/presentation/template/lato/LatoLatin-BoldItalic.woff new file mode 100755 index 0000000..3e683fe Binary files /dev/null and b/presentation/template/lato/LatoLatin-BoldItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-BoldItalic.woff2 b/presentation/template/lato/LatoLatin-BoldItalic.woff2 new file mode 100755 index 0000000..f7bace1 Binary files /dev/null and b/presentation/template/lato/LatoLatin-BoldItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Hairline.eot b/presentation/template/lato/LatoLatin-Hairline.eot new file mode 100755 index 0000000..6fcd8a5 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Hairline.eot differ diff --git a/presentation/template/lato/LatoLatin-Hairline.ttf b/presentation/template/lato/LatoLatin-Hairline.ttf new file mode 100755 index 0000000..5984ab0 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Hairline.ttf differ diff --git a/presentation/template/lato/LatoLatin-Hairline.woff b/presentation/template/lato/LatoLatin-Hairline.woff new file mode 100755 index 0000000..5392604 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Hairline.woff differ diff --git a/presentation/template/lato/LatoLatin-Hairline.woff2 b/presentation/template/lato/LatoLatin-Hairline.woff2 new file mode 100755 index 0000000..820ac0f Binary files /dev/null and b/presentation/template/lato/LatoLatin-Hairline.woff2 differ diff --git a/presentation/template/lato/LatoLatin-HairlineItalic.eot b/presentation/template/lato/LatoLatin-HairlineItalic.eot new file mode 100755 index 0000000..bd696e2 Binary files /dev/null and b/presentation/template/lato/LatoLatin-HairlineItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-HairlineItalic.ttf b/presentation/template/lato/LatoLatin-HairlineItalic.ttf new file mode 100755 index 0000000..d7d8753 Binary files /dev/null and b/presentation/template/lato/LatoLatin-HairlineItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-HairlineItalic.woff b/presentation/template/lato/LatoLatin-HairlineItalic.woff new file mode 100755 index 0000000..bcb96fd Binary files /dev/null and b/presentation/template/lato/LatoLatin-HairlineItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-HairlineItalic.woff2 b/presentation/template/lato/LatoLatin-HairlineItalic.woff2 new file mode 100755 index 0000000..69fa81d Binary files /dev/null and b/presentation/template/lato/LatoLatin-HairlineItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Heavy.eot b/presentation/template/lato/LatoLatin-Heavy.eot new file mode 100755 index 0000000..d4a5e44 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Heavy.eot differ diff --git a/presentation/template/lato/LatoLatin-Heavy.ttf b/presentation/template/lato/LatoLatin-Heavy.ttf new file mode 100755 index 0000000..6d97505 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Heavy.ttf differ diff --git a/presentation/template/lato/LatoLatin-Heavy.woff b/presentation/template/lato/LatoLatin-Heavy.woff new file mode 100755 index 0000000..82e5027 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Heavy.woff differ diff --git a/presentation/template/lato/LatoLatin-Heavy.woff2 b/presentation/template/lato/LatoLatin-Heavy.woff2 new file mode 100755 index 0000000..24bc6fa Binary files /dev/null and b/presentation/template/lato/LatoLatin-Heavy.woff2 differ diff --git a/presentation/template/lato/LatoLatin-HeavyItalic.eot b/presentation/template/lato/LatoLatin-HeavyItalic.eot new file mode 100755 index 0000000..6ffd2b5 Binary files /dev/null and b/presentation/template/lato/LatoLatin-HeavyItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-HeavyItalic.ttf b/presentation/template/lato/LatoLatin-HeavyItalic.ttf new file mode 100755 index 0000000..4f91d03 Binary files /dev/null and b/presentation/template/lato/LatoLatin-HeavyItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-HeavyItalic.woff b/presentation/template/lato/LatoLatin-HeavyItalic.woff new file mode 100755 index 0000000..d588f6f Binary files /dev/null and b/presentation/template/lato/LatoLatin-HeavyItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-HeavyItalic.woff2 b/presentation/template/lato/LatoLatin-HeavyItalic.woff2 new file mode 100755 index 0000000..0a95552 Binary files /dev/null and b/presentation/template/lato/LatoLatin-HeavyItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Italic.eot b/presentation/template/lato/LatoLatin-Italic.eot new file mode 100755 index 0000000..403b9a5 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Italic.eot differ diff --git a/presentation/template/lato/LatoLatin-Italic.ttf b/presentation/template/lato/LatoLatin-Italic.ttf new file mode 100755 index 0000000..c61fc07 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Italic.ttf differ diff --git a/presentation/template/lato/LatoLatin-Italic.woff b/presentation/template/lato/LatoLatin-Italic.woff new file mode 100755 index 0000000..d8cf84c Binary files /dev/null and b/presentation/template/lato/LatoLatin-Italic.woff differ diff --git a/presentation/template/lato/LatoLatin-Italic.woff2 b/presentation/template/lato/LatoLatin-Italic.woff2 new file mode 100755 index 0000000..aaa5a35 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Italic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Light.eot b/presentation/template/lato/LatoLatin-Light.eot new file mode 100755 index 0000000..865537d Binary files /dev/null and b/presentation/template/lato/LatoLatin-Light.eot differ diff --git a/presentation/template/lato/LatoLatin-Light.ttf b/presentation/template/lato/LatoLatin-Light.ttf new file mode 100755 index 0000000..6af1b85 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Light.ttf differ diff --git a/presentation/template/lato/LatoLatin-Light.woff b/presentation/template/lato/LatoLatin-Light.woff new file mode 100755 index 0000000..e7d4278 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Light.woff differ diff --git a/presentation/template/lato/LatoLatin-Light.woff2 b/presentation/template/lato/LatoLatin-Light.woff2 new file mode 100755 index 0000000..b6d0288 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Light.woff2 differ diff --git a/presentation/template/lato/LatoLatin-LightItalic.eot b/presentation/template/lato/LatoLatin-LightItalic.eot new file mode 100755 index 0000000..52ee50c Binary files /dev/null and b/presentation/template/lato/LatoLatin-LightItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-LightItalic.ttf b/presentation/template/lato/LatoLatin-LightItalic.ttf new file mode 100755 index 0000000..b881036 Binary files /dev/null and b/presentation/template/lato/LatoLatin-LightItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-LightItalic.woff b/presentation/template/lato/LatoLatin-LightItalic.woff new file mode 100755 index 0000000..bb72fd2 Binary files /dev/null and b/presentation/template/lato/LatoLatin-LightItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-LightItalic.woff2 b/presentation/template/lato/LatoLatin-LightItalic.woff2 new file mode 100755 index 0000000..fc21432 Binary files /dev/null and b/presentation/template/lato/LatoLatin-LightItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Medium.eot b/presentation/template/lato/LatoLatin-Medium.eot new file mode 100755 index 0000000..34041da Binary files /dev/null and b/presentation/template/lato/LatoLatin-Medium.eot differ diff --git a/presentation/template/lato/LatoLatin-Medium.ttf b/presentation/template/lato/LatoLatin-Medium.ttf new file mode 100755 index 0000000..7ebafb4 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Medium.ttf differ diff --git a/presentation/template/lato/LatoLatin-Medium.woff b/presentation/template/lato/LatoLatin-Medium.woff new file mode 100755 index 0000000..a430a4f Binary files /dev/null and b/presentation/template/lato/LatoLatin-Medium.woff differ diff --git a/presentation/template/lato/LatoLatin-Medium.woff2 b/presentation/template/lato/LatoLatin-Medium.woff2 new file mode 100755 index 0000000..20251fe Binary files /dev/null and b/presentation/template/lato/LatoLatin-Medium.woff2 differ diff --git a/presentation/template/lato/LatoLatin-MediumItalic.eot b/presentation/template/lato/LatoLatin-MediumItalic.eot new file mode 100755 index 0000000..2f87d5c Binary files /dev/null and b/presentation/template/lato/LatoLatin-MediumItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-MediumItalic.ttf b/presentation/template/lato/LatoLatin-MediumItalic.ttf new file mode 100755 index 0000000..6f929df Binary files /dev/null and b/presentation/template/lato/LatoLatin-MediumItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-MediumItalic.woff b/presentation/template/lato/LatoLatin-MediumItalic.woff new file mode 100755 index 0000000..ab7bbc8 Binary files /dev/null and b/presentation/template/lato/LatoLatin-MediumItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-MediumItalic.woff2 b/presentation/template/lato/LatoLatin-MediumItalic.woff2 new file mode 100755 index 0000000..1a3a3f1 Binary files /dev/null and b/presentation/template/lato/LatoLatin-MediumItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Regular.eot b/presentation/template/lato/LatoLatin-Regular.eot new file mode 100755 index 0000000..96a9035 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Regular.eot differ diff --git a/presentation/template/lato/LatoLatin-Regular.ttf b/presentation/template/lato/LatoLatin-Regular.ttf new file mode 100755 index 0000000..bcc5778 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Regular.ttf differ diff --git a/presentation/template/lato/LatoLatin-Regular.woff b/presentation/template/lato/LatoLatin-Regular.woff new file mode 100755 index 0000000..bf73a6d Binary files /dev/null and b/presentation/template/lato/LatoLatin-Regular.woff differ diff --git a/presentation/template/lato/LatoLatin-Regular.woff2 b/presentation/template/lato/LatoLatin-Regular.woff2 new file mode 100755 index 0000000..a4d084b Binary files /dev/null and b/presentation/template/lato/LatoLatin-Regular.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Semibold.eot b/presentation/template/lato/LatoLatin-Semibold.eot new file mode 100755 index 0000000..e97f7b5 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Semibold.eot differ diff --git a/presentation/template/lato/LatoLatin-Semibold.ttf b/presentation/template/lato/LatoLatin-Semibold.ttf new file mode 100755 index 0000000..b9481d1 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Semibold.ttf differ diff --git a/presentation/template/lato/LatoLatin-Semibold.woff b/presentation/template/lato/LatoLatin-Semibold.woff new file mode 100755 index 0000000..5e22897 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Semibold.woff differ diff --git a/presentation/template/lato/LatoLatin-Semibold.woff2 b/presentation/template/lato/LatoLatin-Semibold.woff2 new file mode 100755 index 0000000..1861c24 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Semibold.woff2 differ diff --git a/presentation/template/lato/LatoLatin-SemiboldItalic.eot b/presentation/template/lato/LatoLatin-SemiboldItalic.eot new file mode 100755 index 0000000..4918fbf Binary files /dev/null and b/presentation/template/lato/LatoLatin-SemiboldItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-SemiboldItalic.ttf b/presentation/template/lato/LatoLatin-SemiboldItalic.ttf new file mode 100755 index 0000000..d715514 Binary files /dev/null and b/presentation/template/lato/LatoLatin-SemiboldItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-SemiboldItalic.woff b/presentation/template/lato/LatoLatin-SemiboldItalic.woff new file mode 100755 index 0000000..ae27f2f Binary files /dev/null and b/presentation/template/lato/LatoLatin-SemiboldItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-SemiboldItalic.woff2 b/presentation/template/lato/LatoLatin-SemiboldItalic.woff2 new file mode 100755 index 0000000..da18241 Binary files /dev/null and b/presentation/template/lato/LatoLatin-SemiboldItalic.woff2 differ diff --git a/presentation/template/lato/LatoLatin-Thin.eot b/presentation/template/lato/LatoLatin-Thin.eot new file mode 100755 index 0000000..2e9ac8d Binary files /dev/null and b/presentation/template/lato/LatoLatin-Thin.eot differ diff --git a/presentation/template/lato/LatoLatin-Thin.ttf b/presentation/template/lato/LatoLatin-Thin.ttf new file mode 100755 index 0000000..7752047 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Thin.ttf differ diff --git a/presentation/template/lato/LatoLatin-Thin.woff b/presentation/template/lato/LatoLatin-Thin.woff new file mode 100755 index 0000000..431e4a7 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Thin.woff differ diff --git a/presentation/template/lato/LatoLatin-Thin.woff2 b/presentation/template/lato/LatoLatin-Thin.woff2 new file mode 100755 index 0000000..3f0cc90 Binary files /dev/null and b/presentation/template/lato/LatoLatin-Thin.woff2 differ diff --git a/presentation/template/lato/LatoLatin-ThinItalic.eot b/presentation/template/lato/LatoLatin-ThinItalic.eot new file mode 100755 index 0000000..01a762e Binary files /dev/null and b/presentation/template/lato/LatoLatin-ThinItalic.eot differ diff --git a/presentation/template/lato/LatoLatin-ThinItalic.ttf b/presentation/template/lato/LatoLatin-ThinItalic.ttf new file mode 100755 index 0000000..34f99ba Binary files /dev/null and b/presentation/template/lato/LatoLatin-ThinItalic.ttf differ diff --git a/presentation/template/lato/LatoLatin-ThinItalic.woff b/presentation/template/lato/LatoLatin-ThinItalic.woff new file mode 100755 index 0000000..72902b4 Binary files /dev/null and b/presentation/template/lato/LatoLatin-ThinItalic.woff differ diff --git a/presentation/template/lato/LatoLatin-ThinItalic.woff2 b/presentation/template/lato/LatoLatin-ThinItalic.woff2 new file mode 100755 index 0000000..caafd60 Binary files /dev/null and b/presentation/template/lato/LatoLatin-ThinItalic.woff2 differ diff --git a/presentation/template/lato/lato.css b/presentation/template/lato/lato.css new file mode 100755 index 0000000..f842088 --- /dev/null +++ b/presentation/template/lato/lato.css @@ -0,0 +1,39 @@ +/* Webfont: LatoLatin-Bold */ +@font-face { + font-family: 'Lato'; + src: url('LatoLatin-Bold.eot'); /* IE9 Compat Modes */ + src: url('LatoLatin-Bold.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('LatoLatin-Bold.woff2') format('woff2'), /* Modern Browsers */ + url('LatoLatin-Bold.woff') format('woff'), /* Modern Browsers */ + url('LatoLatin-Bold.ttf') format('truetype'); + font-style: normal; + font-weight: bold; + text-rendering: optimizeLegibility; +} + +/* Webfont: LatoLatin-Italic */ +@font-face { + font-family: 'Lato'; + src: url('LatoLatin-Italic.eot'); /* IE9 Compat Modes */ + src: url('LatoLatin-Italic.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('LatoLatin-Italic.woff2') format('woff2'), /* Modern Browsers */ + url('LatoLatin-Italic.woff') format('woff'), /* Modern Browsers */ + url('LatoLatin-Italic.ttf') format('truetype'); + font-style: italic; + font-weight: normal; + text-rendering: optimizeLegibility; +} + +/* Webfont: LatoLatin-Regular */ +@font-face { + font-family: 'Lato'; + src: url('LatoLatin-Regular.eot'); /* IE9 Compat Modes */ + src: url('LatoLatin-Regular.eot?#iefix') format('embedded-opentype'), /* IE6-IE8 */ + url('LatoLatin-Regular.woff2') format('woff2'), /* Modern Browsers */ + url('LatoLatin-Regular.woff') format('woff'), /* Modern Browsers */ + url('LatoLatin-Regular.ttf') format('truetype'); + font-style: normal; + font-weight: normal; + text-rendering: optimizeLegibility; +} + diff --git a/presentation/template/mathjax b/presentation/template/mathjax new file mode 160000 index 0000000..d4ab1b3 --- /dev/null +++ b/presentation/template/mathjax @@ -0,0 +1 @@ +Subproject commit d4ab1b35c96dd964eaa9e1ed2c86e39fffbdacf6 diff --git a/presentation/template/my-chalkboard/README.md b/presentation/template/my-chalkboard/README.md new file mode 100755 index 0000000..a5a2e71 --- /dev/null +++ b/presentation/template/my-chalkboard/README.md @@ -0,0 +1,123 @@ +# Chalkboard + +With this plugin you can add a chalkboard to reveal.js. The plugin provides two possibilities to include handwritten notes to your presentation: + +- you can make notes directly on the slides, e.g. to comment on certain aspects, +- you can open a chalkboard on which you can make notes. + +The main use case in mind when implementing the plugin is classroom usage in which you may want to explain some course content and quickly need to make some notes. + +The plugin records all drawings made so that they can be play backed using the ```autoSlide``` feature or the ```audio-slideshow``` plugin. + +[Check out the live demo](http://courses.telematique.eu/reveal.js-plugins/chalkboard-demo.html) + +The chalkboard effect is based on [Chalkboard](https://github.com/mmoustafa/Chalkboard) by Mohamed Moustafa. + +## Installation + +Copy the file ```chalkboard.js``` and the ```img``` directory into the plugin folder of your reveal.js presentation, i.e. ```plugin/chalkboard```. + +Add the plugins to the dependencies in your presentation as shown below. + +```javascript +Reveal.initialize({ + // ... + chalkboard: { + // optionally load pre-recorded chalkboard drawing from file + src: "chalkboard.json", + }, + dependencies: [ + // ... + { src: 'plugin/chalkboard/chalkboard.js' }, + // ... + ], + keyboard: { + 67: function() { RevealChalkboard.toggleNotesCanvas() }, // toggle notes canvas when 'c' is pressed + 66: function() { RevealChalkboard.toggleChalkboard() }, // toggle chalkboard when 'b' is pressed + 46: function() { RevealChalkboard.clear() }, // clear chalkboard when 'DEL' is pressed + 8: function() { RevealChalkboard.reset() }, // reset chalkboard data on current slide when 'BACKSPACE' is pressed + 68: function() { RevealChalkboard.download() }, // downlad recorded chalkboard drawing when 'd' is pressed + }, + // ... + +}); +``` +In order to include buttons for opening and closing the notes canvas or the chalkboard you should make sure that ```font-awesome``` is available. The easiest way is to include +``` + +``` +to the ```head``` section of you HTML-file. + +## Usage + +### Enable & disable + +With above configuration the notes canvas is opened and closed when pressing 'c' and the chalkboard is opened and closed when pressing 'b'. + +### Mouse +- Click the left mouse button and drag to write on notes canvas or chalkboard +- Click the right mouse button and drag to wipe away previous drawings + +### Touch +- Touch and move to write on notes canvas or chalkboard +- Touch and hold for half a second, then move to wipe away previous drawings + +### Keyboard +- Click the 'DEL' key to clear the chalkboard +- Click the 'd' key to download chalkboard drawings +- Click the 'BACKSPACE' key to delete all chalkboard drawings on the current slide + +## Playback + +If the ```autoSlide``` feature is set or if the ```audio-slideshow``` plugin is used, pre-recorded chalkboard drawings can be played. The slideshow plays back the user interaction with the chalkboard in the same way as it was conducted when recording the data. + +## PDF-Export + +If the slideshow is opened in [print mode](https://github.com/hakimel/reveal.js/#pdf-export) the pre-recorded chalkboard drawings (which must be provided in a file, see ```src``` option) are included in the PDF-file. Each drawing on the chalkboard is added after the slide that was shown when opening the chalkboard. Drawings are also included if they had been cleared (using the 'DEL' key). Drawings on the notes canvas are not included in the PDF-file. + + +## Configuration + +The plugin has several configuration options: + +- ```src```: Optional filename for pre-recorded drawings. +- ```readOnly```: Configuation option allowing to prevent changes to existing drawings. If set to ```true``` no changes can be made, if set to false ```false``` changes can be made, if unset or set to ```undefined``` no changes to the drawings can be made after returning to a slide or fragment for which drawings had been recorded before. In any case the recorded drawings for a slide or fragment can be cleared by pressing the 'DEL' key (i.e. by using the ```RevealChalkboard.clear()``` function). +- ```toggleNotesButton```: If set to ```true``` a button for opening and closing the notes canvas is shown. Alternatively, the css position attributes can be provided if the default position is not appropriate. +- ```toggleChalkboardButton```: If set to ```true``` a button for opening and closing the chalkboard is shown. Alternatively, the css position attributes can be provided if the default position is not appropriate. +- ```transition```: Gives the duration (in milliseconds) of the transition for a slide change, so that the notes canvas is drawn after the transition is completed. +- ```theme```: Can be set to either ```"chalkboard"``` or ```"whiteboard"```. + +The following configuration options allow to change the appearance of the notes canvas and the chalkboard. All of these options require two values, the first gives the value for the notes canvas, the second for the chalkboard. + +- ```color```: The first value gives the pen color, the second value gives the color of the chalk. +- ```background```: The first value expects a (semi-)transparent color which is used to provide visual feedback that the notes canvas is enabled, the second value expects a filename to a background image for the chalkboard. +- ```pen```: The first value expects a filename for an image of the pen used for the notes canvas, the second value expects a filename for an image of the pen used for the chalkboard. + +All of the configurations are optional and the default values shown below are used if the options are not provided. + +```javascript +Reveal.initialize({ + // ... + chalkboard: { + src: null, + readOnly: undefined; + toggleChalkboardButton: { left: "30px", bottom: "30px", top: "auto", right: "auto" }, + toggleNotesButton: { left: "30px", bottom: "30px", top: "auto", right: "auto" }, + transition: 800, + theme: "chalkboard", + // configuration options for notes canvas and chalkboard + color: [ 'rgba(0,0,255,1)', 'rgba(255,255,255,0.5)' ] + background: [ 'rgba(127,127,127,.1)' , 'reveal.js-plugins/chalkboard/img/blackboard.png' ], + pen: [ 'reveal.js-plugins/chalkboard/img/boardmarker.png', 'reveal.js-plugins/chalkboard/img/chalk.png' ], + }, + // ... + +}); +``` + + +## License + +MIT licensed + +Copyright (C) 2016 Asvin Goel diff --git a/presentation/template/my-chalkboard/chalkboard.js b/presentation/template/my-chalkboard/chalkboard.js new file mode 100755 index 0000000..b478d3c --- /dev/null +++ b/presentation/template/my-chalkboard/chalkboard.js @@ -0,0 +1,1142 @@ +/***************************************************************** + ** Author: Asvin Goel, goel@telematique.eu + ** + ** A plugin for reveal.js adding a chalkboard. + ** + ** Version: 0.3 + ** + ** License: MIT license (see LICENSE.md) + ** + ** Credits: + ** Chalkboard effect by Mohamed Moustafa https://github.com/mmoustafa/Chalkboard + ******************************************************************/ + +var RevealChalkboard = window.RevealChalkboard || (function(){ + var path = scriptPath(); + function scriptPath() { + // obtain plugin path from the script element + var path; + if (document.currentScript) { + path = document.currentScript.src.slice(0, -13); + } else { + var sel = document.querySelector('script[src$="/chalkboard.js"]') + if (sel) { + path = sel.src.slice(0, -13); + } + } + //console.log("Path: " + path); + return path; + } + + /***************************************************************** + ** Configuration + ******************************************************************/ + var config = Reveal.getConfig().chalkboard || {}; + + var background, pen, draw, color; + var theme = config.theme || "chalkboard"; + switch ( theme ) { + case "whiteboard": + background = [ 'rgba(127,127,127,.1)' , path + 'img/whiteboard.png' ]; + pen = [ path + 'img/boardmarker.png', path + 'img/boardmarker.png' ]; + draw = [ drawWithPen , drawWithPen ]; + color = [ 'rgba(0,0,255,1)', 'rgba(0,0,255,1)' ]; + break; + default: + background = [ 'rgba(127,127,127,.1)' , path + 'img/blackboard.png' ]; + pen = [ path + 'img/boardmarker.png', path + 'img/chalk.png' ]; + draw = [ drawWithPen , drawWithChalk ]; + color = [ 'rgba(0,0,255,1)', 'rgba(255,255,255,0.5)' ]; + } + + if ( config.background ) background = config.background; + if ( config.pen ) pen = config.pen; + if ( config.draw ) draw = config.draw; + if ( config.color ) color = config.color; + + var toggleChalkboardButton = config.toggleChalkboardButton == undefined ? true : config.toggleChalkboardButton; + var toggleNotesButton = config.toggleNotesButton == undefined ? true : config.toggleNotesButton; + var toggleEraserButton = config.toggleEraserButton == undefined ? true : config.toggleEraserButton; + var transition = config.transition || 800; + + var readOnly = config.readOnly; + + // MARIO + var eraseMode = false; + + + /***************************************************************** + ** Setup + ******************************************************************/ + + var eraserDiameter = 20; + + if ( toggleChalkboardButton ) + { + var buttonC = document.createElement( 'div' ); + buttonC.id = "toggle-chalkboard"; + buttonC.style.position = "absolute"; + buttonC.style.zIndex = 30; + buttonC.style.fontSize = "20px"; + buttonC.style.left = "10px"; + buttonC.style.bottom = "10px"; + buttonC.style.top = "auto"; + buttonC.style.right = "auto"; + buttonC.style.padding = "3px"; + buttonC.style.borderRadius = "3px"; + buttonC.style.color = "lightgrey"; + buttonC.innerHTML = ''; + document.querySelector(".reveal").appendChild( buttonC ); + } + + if ( toggleNotesButton ) + { + var buttonN = document.createElement( 'div' ); + buttonN.id = "toggle-notes"; + buttonN.style.position = "absolute"; + buttonN.style.zIndex = 30; + buttonN.style.fontSize = "20px"; + buttonN.style.left = "50px"; + buttonN.style.bottom = "10px"; + buttonN.style.top = "auto"; + buttonN.style.right = "auto"; + buttonN.style.padding = "3px"; + buttonN.style.borderRadius = "3px"; + buttonN.style.color = "lightgrey"; + buttonN.innerHTML = ''; + document.querySelector(".reveal").appendChild( buttonN ); + } + + if ( toggleEraserButton ) + { + var buttonE = document.createElement( 'div' ); + buttonE.id = "button-eraser"; + buttonE.style.position = "absolute"; + buttonE.style.zIndex = 30; + buttonE.style.fontSize = "20px"; + buttonE.style.left = "90px"; + buttonE.style.bottom = "10px"; + buttonE.style.top = "auto"; + buttonE.style.right = "auto"; + buttonE.style.padding = "3px"; + buttonE.style.borderRadius = "3px"; + buttonE.style.color = "lightgrey"; + buttonE.innerHTML = ''; + document.querySelector(".reveal").appendChild( buttonE ); + } + + var drawingCanvas = [ {id: "notescanvas" }, {id: "chalkboard" } ]; + setupDrawingCanvas(0); + setupDrawingCanvas(1); + + var mode = 0; // 0: notes canvas, 1: chalkboard + + var mouseX = 0; + var mouseY = 0; + var xLast = null; + var yLast = null; + + var slideStart = Date.now(); + var slideIndices = { h:0, v:0 }; + var event = null; + var timeouts = [ [], [] ]; + var slidechangeTimeout = null; + var playback = false; + + function setupDrawingCanvas( id ) { + var container = document.createElement( 'div' ); + container.id = drawingCanvas[id].id; + container.classList.add( 'overlay' ); + container.setAttribute( 'data-prevent-swipe', '' ); + container.oncontextmenu = function() { return false; } + container.style.cursor = 'url("' + pen[ id ] + '"), auto'; + + drawingCanvas[id].width = window.innerWidth; + drawingCanvas[id].height = window.innerHeight; + drawingCanvas[id].scale = 1; + drawingCanvas[id].xOffset = 0; + drawingCanvas[id].yOffset = 0; + + + if ( id == "0" ) { + container.style.background = 'rgba(0,0,0,0)'; + container.style.zIndex = "24"; + container.classList.add( 'visible' ) + container.style.pointerEvents = "none"; + + var slides = document.querySelector(".slides"); + var aspectRatio = Reveal.getConfig().width / Reveal.getConfig().height; + if ( drawingCanvas[id].width > drawingCanvas[id].height*aspectRatio ) { + drawingCanvas[id].xOffset = (drawingCanvas[id].width - drawingCanvas[id].height*aspectRatio) / 2; + } + else if ( drawingCanvas[id].height > drawingCanvas[id].width/aspectRatio ) { + drawingCanvas[id].yOffset = ( drawingCanvas[id].height - drawingCanvas[id].width/aspectRatio ) / 2; + } + } + else { + container.style.background = 'url("' + background[id] + '") repeat'; + container.style.zIndex = "26"; + } + + var sponge = document.createElement( 'img' ); + sponge.src = path + 'img/sponge.png'; + sponge.id = "sponge"; + sponge.style.visibility = "hidden"; + sponge.style.position = "absolute"; + container.appendChild( sponge ); + drawingCanvas[id].sponge = sponge; + + var canvas = document.createElement( 'canvas' ); + canvas.width = drawingCanvas[id].width; + canvas.height = drawingCanvas[id].height; + canvas.setAttribute( 'data-chalkboard', id ); + canvas.style.cursor = 'url("' + pen[ id ] + '"), auto'; + container.appendChild( canvas ); + drawingCanvas[id].canvas = canvas; + + drawingCanvas[id].context = canvas.getContext("2d"); + + + document.querySelector( '.reveal' ).appendChild( container ); + drawingCanvas[id].container = container; + } + + + /***************************************************************** + ** Storage + ******************************************************************/ + var storage = [ + { width: drawingCanvas[0].width - 2 * drawingCanvas[0].xOffset, height: drawingCanvas[0].height - 2 * drawingCanvas[0].yOffset, data: []}, + { width: drawingCanvas[1].width, height: drawingCanvas[1].height, data: []} + ]; + //console.log( JSON.stringify(storage)); + + if ( config.src != null ) { + loadData( config.src ); + } + + + /** + * Load data. + */ + function loadData( filename ) { + var xhr = new XMLHttpRequest(); + xhr.onload = function() { + if (xhr.readyState === 4) { + storage = JSON.parse(xhr.responseText); + for (var id = 0; id < storage.length; id++) { + if ( drawingCanvas[id].width != storage[id].width || drawingCanvas[id].height != storage[id].height ) { + drawingCanvas[id].scale = Math.min( drawingCanvas[id].width/storage[id].width, drawingCanvas[id].height/storage[id].height); + drawingCanvas[id].xOffset = (drawingCanvas[id].width - storage[id].width * drawingCanvas[id].scale)/2; + drawingCanvas[id].yOffset = (drawingCanvas[id].height - storage[id].height * drawingCanvas[id].scale)/2; + } + if ( config.readOnly ) { + drawingCanvas[id].container.style.cursor = 'default'; + drawingCanvas[id].canvas.style.cursor = 'default'; + } + } + } + else { + config.readOnly = undefined; + readOnly = undefined; + console.warn( 'Failed to get file ' + filename +". ReadyState: " + xhr.readyState + ", Status: " + xhr.status); + } + }; + + xhr.open( 'GET', filename, true ); + try { + xhr.send(); + } + catch ( error ) { + config.readOnly = undefined; + readOnly = undefined; + console.warn( 'Failed to get file ' + filename + '. Make sure that the presentation and the file are served by a HTTP server and the file can be found there. ' + error ); + } + } + + /** + * Download data. + */ + function downloadData() { + var a = document.createElement('a'); + document.body.appendChild(a); + try { + a.download = "chalkboard.json"; + var blob = new Blob( [ JSON.stringify( storage ) ], { type: "application/json"} ); + a.href = window.URL.createObjectURL( blob ); + } catch( error ) { + a.innerHTML += " (" + error + ")"; + } + a.click(); + document.body.removeChild(a); + } + + /** + * Returns data object for the slide with the given indices. + */ + function getSlideData( indices, id ) { + if ( id == undefined ) id = mode; + //console.log("ID: " + id + "/" + mode ); + if (!indices) indices = slideIndices; + for (var i = 0; i < storage[id].data.length; i++) { + if (storage[id].data[i].slide.h === indices.h && storage[id].data[i].slide.v === indices.v && storage[id].data[i].slide.f === indices.f ) { + return storage[id].data[i]; + } + } + //console.log(JSON.stringify(indices) + " push"); + + storage[id].data.push( { slide: indices, events: [], duration: 0 } ); + return storage[id].data[storage[id].data.length-1]; + } + + function hasSlideData( indices, id ) { + if ( id == undefined ) id = mode; + if (!indices) indices = slideIndices; + for (var i = 0; i < storage[id].data.length; i++) { + if (storage[id].data[i].slide.h === indices.h && storage[id].data[i].slide.v === indices.v && storage[id].data[i].slide.f === indices.f ) { + return storage[id].data[i].events.length > 0; + } + } + return false; + } + + /** + * Returns maximum duration of slide playback for both modes + */ + function getSlideDuration( indices ) { + if (!indices) indices = slideIndices; + var duration = 0; + for (var id = 0; id < 2; id++) { + for (var i = 0; i < storage[id].data.length; i++) { + if (storage[id].data[i].slide.h === indices.h && storage[id].data[i].slide.v === indices.v && storage[id].data[i].slide.f === indices.f ) { + duration = Math.max( duration, storage[id].data[i].duration ); + break; + } + } + } + //console.log( duration ); + return duration; + } + + /***************************************************************** + ** Print + ******************************************************************/ + var printMode = ( /print-pdf/gi ).test( window.location.search ); + + function createPrintout( ) { + + // MARIO: we want to print the drawings + //drawingCanvas[0].container.classList.remove( 'visible' ); // do not print notes canvas + + var patImg = new Image(); + patImg.onload = function () { + var nextSlide = []; + var width = Reveal.getConfig().width; + var height = Reveal.getConfig().height; + var scale = 1; + var xOffset = 0; + var yOffset = 0; + if ( width != storage[1].width || height != storage[1].height ) { + scale = Math.min( width/storage[1].width, height/storage[1].height); + xOffset = (width - storage[1].width * scale)/2; + yOffset = (height - storage[1].height * scale)/2; + } + + // collect next-slides for all slides with board stuff + for (var i = 0; i < storage[1].data.length; i++) { + var h = storage[1].data[i].slide.h; + var v = storage[1].data[i].slide.v; + var f = storage[1].data[i].slide.f; + var slide = f ? Reveal.getSlide(h,v,f) : Reveal.getSlide(h,v); + nextSlide.push( slide.nextSibling ); + } + + // go through board storage, paint image, insert slide + for (var i = 0; i < storage[1].data.length; i++) + { + var h = storage[1].data[i].slide.h; + var v = storage[1].data[i].slide.v; + var f = storage[1].data[i].slide.f; + var slide = f ? Reveal.getSlide(h,v,f) : Reveal.getSlide(h,v); + + var slideData = getSlideData( storage[1].data[i].slide, 1 ); + + var imgCanvas = document.createElement('canvas'); + imgCanvas.width = 2*width; + imgCanvas.height = 2*height; + // MARIO: not sure why we have to multiply but 2, but now it looks much better + // maybe a MacOS retina artifact + + + var imgCtx = imgCanvas.getContext("2d"); + imgCtx.imageSmoothingEnabled = true; + + // setup drawing context: for print, use black on white drawing + imgCtx.fillStyle = "white"; + color[1] = "black"; + imgCtx.rect(0,0,imgCanvas.width,imgCanvas.height); + imgCtx.fill(); + + for (var j = 0; j < slideData.events.length; j++) { + switch ( slideData.events[j].type ) { + case "draw": + for (var k = 1; k < slideData.events[j].curve.length; k++) { + draw[1]( imgCtx, + xOffset + slideData.events[j].curve[k-1].x*scale, + yOffset + slideData.events[j].curve[k-1].y*scale, + xOffset + slideData.events[j].curve[k].x*scale, + yOffset + slideData.events[j].curve[k].y*scale + ); + } + break; + case "erase": + for (var k = 0; k < slideData.events[j].curve.length; k++) { + erase( imgCtx, + xOffset + slideData.events[j].curve[k].x*scale, + yOffset + slideData.events[j].curve[k].y*scale + ); + } + break; + case "clear": + var newSlide = document.createElement( 'section' ); + newSlide.classList.add( 'future' ); + newSlide.innerHTML = '

Drawing

'; + newSlide.setAttribute("data-background", 'url("' + imgCanvas.toDataURL("image/png") +'")' ); + slide.parentElement.insertBefore( newSlide, nextSlide[i] ); + + var imgCanvas = document.createElement('canvas'); + imgCanvas.width = width; + imgCanvas.height = height; + var imgCtx = imgCanvas.getContext("2d"); + imgCtx.fillStyle = imgCtx.createPattern( patImg ,'repeat'); + imgCtx.rect(0,0,imgCanvas.width,imgCanvas.height); + imgCtx.fill(); + break; + default: + break; + } + } + var newSlide = document.createElement( 'section' ); + newSlide.classList.add( 'future' ); + newSlide.innerHTML = '

Drawing

'; + newSlide.setAttribute("data-background", 'url("' + imgCanvas.toDataURL("image/png") +'")' ); + slide.parentElement.insertBefore( newSlide, nextSlide[i] ); + + } + Reveal.sync(); + }; + patImg.src = background[1]; + } + + + + /***************************************************************** + ** Drawings + ******************************************************************/ + + function drawWithPen(context,fromX,fromY,toX,toY){ + context.lineWidth = 2; + context.lineCap = 'round'; + context.strokeStyle = color[0]; + context.beginPath(); + context.moveTo(fromX, fromY); + context.lineTo(toX, toY); + context.stroke(); + } + + function drawWithChalk(context,fromX,fromY,toX,toY){ + var brushDiameter = 2; + context.lineWidth = brushDiameter; + context.lineCap = 'round'; + context.fillStyle = color[1]; // 'rgba(255,255,255,0.5)'; + context.strokeStyle = color[1]; + context.beginPath(); + context.moveTo(fromX, fromY); + context.lineTo(toX, toY); + context.stroke(); + } + + function erase(context,x,y){ + context.save(); + context.beginPath(); + context.arc(x, y, eraserDiameter, 0, 2 * Math.PI, false); + context.clip(); + context.clearRect(x - eraserDiameter - 1, y - eraserDiameter - 1, eraserDiameter * 2 + 2, eraserDiameter * 2 + 2); + context.restore(); + + } + + + /** + * Opens an overlay for the chalkboard. + */ + function showChalkboard() { + drawingCanvas[0].sponge.style.visibility = "hidden"; // make sure that the sponge from touch events is hidden + drawingCanvas[1].sponge.style.visibility = "hidden"; // make sure that the sponge from touch events is hidden + drawingCanvas[1].container.classList.add( 'visible' ); + mode = 1; + } + + + /** + * Closes open chalkboard. + */ + function closeChalkboard() { + drawingCanvas[0].sponge.style.visibility = "hidden"; // make sure that the sponge from touch events is hidden + drawingCanvas[1].sponge.style.visibility = "hidden"; // make sure that the sponge from touch events is hidden + drawingCanvas[1].container.classList.remove( 'visible' ); + xLast = null; + yLast = null; + event = null; + mode = 0; + } + + /** + * Clear current canvas. + */ + function clearCanvas( id ) { + if ( id == 0 ) clearTimeout( slidechangeTimeout ); + drawingCanvas[id].context.clearRect(0,0,drawingCanvas[id].width,drawingCanvas[id].height); + } + + /***************************************************************** + ** Playback + ******************************************************************/ + + document.addEventListener('seekplayback', function( event ) { + //console.log('event seekplayback ' + event.timestamp); + stopPlayback(); + if ( !playback || event.timestamp == 0) { + // in other cases startplayback fires after seeked + startPlayback( event.timestamp ); + } + //console.log('seeked'); + }); + + + document.addEventListener('startplayback', function( event ) { + //console.log('event startplayback ' + event.timestamp); + stopPlayback(); + playback = true; + startPlayback( event.timestamp ); + }); + + document.addEventListener('stopplayback', function( event ) { + //console.log('event stopplayback ' + (Date.now() - slideStart) ); + playback = false; + stopPlayback(); + }); + + function recordEvent( event ) { + var slideData = getSlideData(); + var i = slideData.events.length; + while ( i > 0 && event.begin < slideData.events[i-1].begin ) { + i--; + } + slideData.events.splice( i, 0, event); + slideData.duration = Math.max( slideData.duration, Date.now() - slideStart ) + 1; + } + + function startPlayback( timestamp, finalMode ) { + updateReadOnlyMode(); + + //console.log("playback " + timestamp ); + slideStart = Date.now() - timestamp; + closeChalkboard(); + mode = 0; + for ( var id = 0; id < 2; id++ ) { + clearCanvas( id ); + + /* MARIO: don't just call getSlideData, since it pushed slide data when nothing is found + which somehow inserts black slides for printing */ + if (hasSlideData( slideIndices, id )) + { + var slideData = getSlideData( slideIndices, id ); + var index = 0; + while ( index < slideData.events.length && slideData.events[index].begin < (Date.now() - slideStart) ) { + playEvent( id, slideData.events[index], timestamp ); + index++; + } + + while ( playback && index < slideData.events.length ) { + timeouts[id].push( setTimeout( playEvent, slideData.events[index].begin - (Date.now() - slideStart), id, slideData.events[index], timestamp ) ); + index++; + } + } + } + + if ( finalMode != undefined ) { + mode = finalMode; + } + if( mode == 1 ) showChalkboard(); + }; + + + function stopPlayback() { + //console.log("stopPlayback"); + //console.log("Timeouts: " + timeouts[0].length + "/"+ timeouts[1].length); + for ( var id = 0; id < 2; id++ ) { + for (var i = 0; i < timeouts[id].length; i++) { + clearTimeout(timeouts[id][i]); + } + timeouts[id] = []; + } + }; + + function playEvent( id, event, timestamp ) { + //console.log( timestamp +" / " + JSON.stringify(event)); + //console.log( id + ": " + timestamp +" / " + event.begin +" / " + event.type +" / " + mode ); + switch ( event.type ) { + case "open": + if ( timestamp <= event.begin ) { + showChalkboard(); + } + else { + mode = 1; + } + + break; + case "close": + if ( timestamp < event.begin ) { + closeChalkboard(); + } + else { + mode = 0; + } + break; + case "clear": + clearCanvas( id ); + break; + case "draw": + drawCurve( id, event, timestamp ); + break; + case "erase": + eraseCurve( id, event, timestamp ); + break; + + } + }; + + function drawCurve( id, event, timestamp ) { + var ctx = drawingCanvas[id].context; + var scale = drawingCanvas[id].scale; + var xOffset = drawingCanvas[id].xOffset; + var yOffset = drawingCanvas[id].yOffset; + + if ( event.curve.length > 1 ) + { + var stepDuration = ( event.end - event.begin )/ ( event.curve.length - 1 ); + + for (var i = 1; i < event.curve.length; i++) { + if (event.begin + i * stepDuration <= (Date.now() - slideStart)) { + draw[id](ctx, + xOffset + event.curve[i-1].x*scale, + yOffset + event.curve[i-1].y*scale, + xOffset + event.curve[i].x*scale, + yOffset + event.curve[i].y*scale); + } + else if ( playback ) { + timeouts.push( setTimeout( + draw[id], Math.max(0,event.begin + i * stepDuration - (Date.now() - slideStart)), ctx, + xOffset + event.curve[i-1].x*scale, + yOffset + event.curve[i-1].y*scale, + xOffset + event.curve[i].x*scale, + yOffset + event.curve[i].y*scale + ) ); + } + } + } + else + { + // we need to record and play single points (for math equations) + var x = xOffset + event.curve[0].x*scale; + var y = yOffset + event.curve[0].y*scale; + draw[id](ctx, x, y, x, y); + } + }; + + function eraseCurve( id, event, timestamp ) { + if ( event.curve.length > 1 ) { + var ctx = drawingCanvas[id].context; + var scale = drawingCanvas[id].scale; + var xOffset = drawingCanvas[id].xOffset; + var yOffset = drawingCanvas[id].yOffset; + + var stepDuration = ( event.end - event.begin )/ event.curve.length; + for (var i = 0; i < event.curve.length; i++) { + if (event.begin + i * stepDuration <= (Date.now() - slideStart)) { + erase(ctx, xOffset + event.curve[i].x*scale, yOffset + event.curve[i].y*scale); + } + else if ( playback ) { + timeouts.push( setTimeout( + erase, Math.max(0,event.begin + i * stepDuration - (Date.now() - slideStart)), ctx, + xOffset + event.curve[i].x * scale, + yOffset + event.curve[i].y * scale + ) + ); + } + } + } + + }; + + /***************************************************************** + ** User interface + ******************************************************************/ + + + // TODO: check all touchevents + document.addEventListener('touchstart', function(evt) { + if ( !readOnly && evt.target.getAttribute('data-chalkboard') == mode ) { + var ctx = drawingCanvas[mode].context; + var scale = drawingCanvas[mode].scale; + var xOffset = drawingCanvas[mode].xOffset; + var yOffset = drawingCanvas[mode].yOffset; + + evt.preventDefault(); + var touch = evt.touches[0]; + mouseX = touch.pageX; + mouseY = touch.pageY; + xLast = mouseX; + yLast = mouseY; + + if (eraseMode) + { + drawingCanvas[mode].sponge.style.visibility = "visible"; + event = { type: "erase", begin: Date.now() - slideStart, end: null, curve: [{x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}] }; + } + else + { + event = { type: "draw", begin: Date.now() - slideStart, end: null, curve: [{x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}] }; + } + } + }, false); + + + document.addEventListener('touchmove', function(evt) { + if ( event ) { + var ctx = drawingCanvas[mode].context; + var scale = drawingCanvas[mode].scale; + var xOffset = drawingCanvas[mode].xOffset; + var yOffset = drawingCanvas[mode].yOffset; + + var touch = evt.touches[0]; + + // finger touch has force == 0 -> ignore + // pencil touch has force != 0 -> process + if (touch.force != 0) + { + mouseX = touch.pageX; + mouseY = touch.pageY; + + if ((mouseX-xLast)*(mouseX-xLast) + (mouseY-yLast)*(mouseY-yLast) > 4.0) + { + if (mouseY < drawingCanvas[mode].height && mouseX < drawingCanvas[mode].width) + { + evt.preventDefault(); + event.curve.push({x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}); + + if ( event.type == "erase" ) + { + drawingCanvas[mode].sponge.style.left = (mouseX - eraserDiameter) +"px" ; + drawingCanvas[mode].sponge.style.top = (mouseY - eraserDiameter) +"px" ; + erase(ctx, mouseX, mouseY); + } + else + { + draw[mode](ctx, xLast, yLast, mouseX, mouseY); + } + + xLast = mouseX; + yLast = mouseY; + } + } + } + } + }, false); + + + document.addEventListener('touchend', function(evt) { + if ( event ) { + // hide sponge image + drawingCanvas[mode].sponge.style.visibility = "hidden"; + event.end = Date.now() - slideStart; + if ( event.type == "erase" || event.curve.length > 1 ) { + // do not save a line with a single point only + recordEvent( event ); + } + event = null; + } + }, false); + + + document.addEventListener( 'mousedown', function( evt ) { + if ( !readOnly && evt.target.getAttribute('data-chalkboard') == mode ) + { + var ctx = drawingCanvas[mode].context; + var scale = drawingCanvas[mode].scale; + var xOffset = drawingCanvas[mode].xOffset; + var yOffset = drawingCanvas[mode].yOffset; + + mouseX = evt.pageX; + mouseY = evt.pageY; + xLast = mouseX; + yLast = mouseY; + + if ( evt.button == 2 || eraseMode ) { + event = { type: "erase", begin: Date.now() - slideStart, end: null, curve: [{x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}]}; + drawingCanvas[mode].canvas.style.cursor = 'url("' + path + 'img/sponge.png") ' + eraserDiameter + ' ' + eraserDiameter + ', auto'; + erase(ctx,mouseX,mouseY); + } + else { + event = { type: "draw", begin: Date.now() - slideStart, end: null, curve: [{x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}] }; + drawingCanvas[mode].canvas.style.cursor = 'url("' + pen[mode] + '"), auto'; + draw[mode](ctx, xLast, yLast, mouseX,mouseY); + } + } + }, true ); + + + document.addEventListener( 'mousemove', function( evt ) { + if ( event ) + { + mouseX = evt.pageX; + mouseY = evt.pageY; + + if ((mouseX-xLast)*(mouseX-xLast) + (mouseY-yLast)*(mouseY-yLast) > 4.0) + { + var ctx = drawingCanvas[mode].context; + var scale = drawingCanvas[mode].scale; + var xOffset = drawingCanvas[mode].xOffset; + var yOffset = drawingCanvas[mode].yOffset; + + event.curve.push({x: (mouseX - xOffset)/scale, y: (mouseY-yOffset)/scale}); + if(mouseY < drawingCanvas[mode].height && mouseX < drawingCanvas[mode].width) { + if ( event.type == "erase" ) { + erase(ctx,mouseX,mouseY); + } + else { + draw[mode](ctx, xLast, yLast, mouseX,mouseY); + } + xLast = mouseX; + yLast = mouseY; + } + } + } + }); + + + document.addEventListener( 'mouseup', function( evt ) { + if ( event ) { + if(evt.button == 2){ + drawingCanvas[mode].canvas.style.cursor = 'url("' + pen[mode] + '"), auto'; + } + event.end = Date.now() - slideStart; + + // we need to record and play single points (for math equations) + //if ( event.type == "erase" || event.curve.length > 1 ) + { + recordEvent( event ); + } + event = null; + } + } ); + + + window.addEventListener( "resize", function() { + //console.log("resize"); + //console.log(Reveal.getScale()); + // Resize the canvas and draw everything again + var timestamp = Date.now() - slideStart; + if ( !playback ) { + timestamp = getSlideDuration(); + } + + //console.log( drawingCanvas[0].scale + "/" + drawingCanvas[0].xOffset + "/" +drawingCanvas[0].yOffset ); + for (var id = 0; id < 2; id++ ) { + drawingCanvas[id].width = window.innerWidth; + drawingCanvas[id].height = window.innerHeight; + drawingCanvas[id].canvas.width = drawingCanvas[id].width; + drawingCanvas[id].canvas.height = drawingCanvas[id].height; + drawingCanvas[id].context.canvas.width = drawingCanvas[id].width; + drawingCanvas[id].context.canvas.height = drawingCanvas[id].height; + + drawingCanvas[id].scale = Math.min( drawingCanvas[id].width/storage[id].width, drawingCanvas[id].height/storage[id].height ); + drawingCanvas[id].xOffset = (drawingCanvas[id].width - storage[id].width * drawingCanvas[id].scale)/2; + drawingCanvas[id].yOffset = (drawingCanvas[id].height - storage[id].height * drawingCanvas[id].scale)/2; + //console.log( drawingCanvas[id].scale + "/" + drawingCanvas[id].xOffset + "/" +drawingCanvas[id].yOffset ); + } + //console.log( window.innerWidth + "/" + window.innerHeight); + startPlayback( timestamp, mode ); + + } ); + + function updateReadOnlyMode() { + if ( config.readOnly == undefined ) { + readOnly = ( getSlideDuration() > 0 ); + if ( readOnly ) { + drawingCanvas[0].container.style.cursor = 'default'; + drawingCanvas[1].container.style.cursor = 'default'; + drawingCanvas[0].canvas.style.cursor = 'default'; + drawingCanvas[1].canvas.style.cursor = 'default'; + if ( notescanvas.style.pointerEvents != "none" ) { + event = null; + notescanvas.style.background = 'rgba(0,0,0,0)'; + notescanvas.style.pointerEvents = "none"; + } + + } + else { + drawingCanvas[0].container.style.cursor = 'url("' + pen[0] + '"), auto';; + drawingCanvas[1].container.style.cursor = 'url("' + pen[1] + '"), auto';; + drawingCanvas[0].canvas.style.cursor = 'url("' + pen[0] + '"), auto';; + drawingCanvas[1].canvas.style.cursor = 'url("' + pen[1] + '"), auto';; + } + } + } + + Reveal.addEventListener( 'ready', function( evt ) { + //console.log('ready'); + if ( !printMode ) { + slideStart = Date.now(); + slideIndices = Reveal.getIndices(); + if ( !playback ) { + startPlayback( getSlideDuration(), 0 ); + } + if ( Reveal.isAutoSliding() ) { + var event = new CustomEvent('startplayback'); + event.timestamp = 0; + document.dispatchEvent( event ); + } + updateReadOnlyMode(); + } + else { + createPrintout(); + } + }); + + + Reveal.addEventListener( 'slidechanged', function( evt ) { + if ( !printMode ) { + slideStart = Date.now(); + slideIndices = Reveal.getIndices(); + closeChalkboard(); + clearCanvas( 0 ); + clearCanvas( 1 ); + if ( !playback ) { + // MARIO: show stuff immediately + //slidechangeTimeout = setTimeout( startPlayback, transition, getSlideDuration(), 0 ); + slidechangeTimeout = setTimeout( startPlayback, 50, getSlideDuration(), 0 ); + } + if ( Reveal.isAutoSliding() ) { + var event = new CustomEvent('startplayback'); + event.timestamp = 0; + document.dispatchEvent( event ); + } + + updateReadOnlyMode(); + } + }); + + + Reveal.addEventListener( 'fragmentshown', function( evt ) { + if ( !printMode ) { + slideStart = Date.now(); + slideIndices = Reveal.getIndices(); + closeChalkboard(); + clearCanvas( 0 ); + clearCanvas( 1 ); + if ( Reveal.isAutoSliding() ) { + var event = new CustomEvent('startplayback'); + event.timestamp = 0; + document.dispatchEvent( event ); + } + else if ( !playback ) { + startPlayback( getSlideDuration(), 0 ); + } + updateReadOnlyMode(); + } + }); + + + Reveal.addEventListener( 'fragmenthidden', function( evt ) { + if ( !printMode ) { + slideStart = Date.now(); + slideIndices = Reveal.getIndices(); + closeChalkboard(); + clearCanvas( 0 ); + clearCanvas( 1 ); + if ( Reveal.isAutoSliding() ) { + document.dispatchEvent( new CustomEvent('stopplayback') ); + } + else if ( !playback ) { + startPlayback( getSlideDuration() ); + closeChalkboard(); + } + updateReadOnlyMode(); + } + }); + + + Reveal.addEventListener( 'autoslideresumed', function( evt ) { + var event = new CustomEvent('startplayback'); + event.timestamp = 0; + document.dispatchEvent( event ); + }); + Reveal.addEventListener( 'autoslidepaused', function( evt ) { + document.dispatchEvent( new CustomEvent('stopplayback') ); + startPlayback( getSlideDuration(), 0 ); + }); + + + + // check whether slide has blackboard scribbles, and then highlight icon + function updateIcon() + { + if ( !printMode ) + { + var idx = Reveal.getIndices(); + if (hasSlideData(idx, 1)) + { + buttonC.style.color = "red"; + buttonC.style.fontSize = "28px"; + buttonC.style.left = "6px"; + buttonC.style.bottom = "6px"; + } + else + { + buttonC.style.color = "lightgrey"; + buttonC.style.fontSize = "20px"; + buttonC.style.left = "10px"; + buttonC.style.bottom = "10px"; + } + } + } + Reveal.addEventListener( 'slidechanged', updateIcon ); + Reveal.addEventListener( 'fragmentshown', updateIcon ); + Reveal.addEventListener( 'fragmenthidden', updateIcon ); + + + + function toggleSponge() + { + if ( !readOnly ) + { + if (eraseMode) + { + eraseMode = false; + drawingCanvas[mode].canvas.style.cursor = 'url("' + pen[mode] + '"), auto'; + buttonE.style.color = "lightgrey"; + } + else + { + eraseMode = true; + drawingCanvas[mode].canvas.style.cursor = 'url("' + path + 'img/sponge.png") ' + eraserDiameter + ' ' + eraserDiameter + ', auto'; + buttonE.style.color = "#2a9ddf"; + } + } + } + + + function toggleNotesCanvas() { + if ( !readOnly ) { + if ( mode == 1 ) { + toggleChalkboard(); + notescanvas.style.background = background[0]; //'rgba(255,0,0,0.5)'; + notescanvas.style.pointerEvents = "auto"; + } + else { + if ( notescanvas.style.pointerEvents != "none" ) { + event = null; + notescanvas.style.background = 'rgba(0,0,0,0)'; + notescanvas.style.pointerEvents = "none"; + buttonN.style.color = "lightgrey"; + } + else { + notescanvas.style.background = background[0]; //'rgba(255,0,0,0.5)'; + notescanvas.style.pointerEvents = "auto"; + buttonN.style.color = "#2a9ddf"; + } + } + } + }; + + function toggleChalkboard() { + //console.log("toggleChalkboard " + mode); + if ( mode == 1 ) { + event = null; + // MARIO if ( !readOnly ) recordEvent( { type:"close", begin: Date.now() - slideStart } ); + closeChalkboard(); + buttonC.style.color = "lightgrey"; + } + else { + showChalkboard(); + // MARIO if ( !readOnly ) recordEvent( { type:"open", begin: Date.now() - slideStart } ); + buttonC.style.color = "#2a9ddf"; + } + }; + + function clear() { + if ( !readOnly ) { + recordEvent( { type:"clear", begin: Date.now() - slideStart } ); + clearCanvas( mode ); + } + }; + + function resetSlide( force ) { + var ok = force || confirm("Please confirm to delete chalkboard drawings on this slide!"); + if ( ok ) { + stopPlayback(); + slideStart = Date.now(); + event = null; + closeChalkboard(); + + clearCanvas( 0 ); + clearCanvas( 1 ); + + mode = 1; + var slideData = getSlideData(); + slideData.duration = 0; + slideData.events = []; + mode = 0; + var slideData = getSlideData(); + slideData.duration = 0; + slideData.events = []; + + updateReadOnlyMode(); + } + }; + + function resetStorage( force ) { + var ok = force || confirm("Please confirm to delete all chalkboard drawings!"); + if ( ok ) { + stopPlayback(); + slideStart = Date.now(); + clearCanvas( 0 ); + clearCanvas( 1 ); + if ( mode == 1 ) { + event = null; + closeChalkboard(); + } + storage = [ + { width: drawingCanvas[0].width - 2 * drawingCanvas[0].xOffset, height: drawingCanvas[0].height - 2 * drawingCanvas[0].yOffset, data: []}, + { width: drawingCanvas[1].width, height: drawingCanvas[1].height, data: []} + ]; + + updateReadOnlyMode(); + } + }; + + this.drawWithPen = drawWithPen; + this.drawWithChalk = drawWithChalk; + this.toggleNotesCanvas = toggleNotesCanvas; + this.toggleChalkboard = toggleChalkboard; + this.clear = clear; + this.reset = resetSlide; + this.resetAll = resetStorage; + this.download = downloadData; + this.toggleSponge = toggleSponge; + + return this; +})(); + diff --git a/presentation/template/my-chalkboard/img/blackboard.png b/presentation/template/my-chalkboard/img/blackboard.png new file mode 100755 index 0000000..50a2f64 Binary files /dev/null and b/presentation/template/my-chalkboard/img/blackboard.png differ diff --git a/presentation/template/my-chalkboard/img/boardmarker.png b/presentation/template/my-chalkboard/img/boardmarker.png new file mode 100755 index 0000000..aa9c115 Binary files /dev/null and b/presentation/template/my-chalkboard/img/boardmarker.png differ diff --git a/presentation/template/my-chalkboard/img/chalk.png b/presentation/template/my-chalkboard/img/chalk.png new file mode 100755 index 0000000..71779ef Binary files /dev/null and b/presentation/template/my-chalkboard/img/chalk.png differ diff --git a/presentation/template/my-chalkboard/img/sponge-bak.png b/presentation/template/my-chalkboard/img/sponge-bak.png new file mode 100755 index 0000000..cbfb269 Binary files /dev/null and b/presentation/template/my-chalkboard/img/sponge-bak.png differ diff --git a/presentation/template/my-chalkboard/img/sponge.png b/presentation/template/my-chalkboard/img/sponge.png new file mode 100755 index 0000000..b26b072 Binary files /dev/null and b/presentation/template/my-chalkboard/img/sponge.png differ diff --git a/presentation/template/my-chalkboard/img/whiteboard.png b/presentation/template/my-chalkboard/img/whiteboard.png new file mode 100755 index 0000000..dbf570a Binary files /dev/null and b/presentation/template/my-chalkboard/img/whiteboard.png differ diff --git a/presentation/template/my-zoom/zoom.js b/presentation/template/my-zoom/zoom.js new file mode 100755 index 0000000..6b1eb62 --- /dev/null +++ b/presentation/template/my-zoom/zoom.js @@ -0,0 +1,155 @@ +// +// Based on zoom.js plugin of reveal.js +// modified to correctly handle reveal's scaling +// to react on dblclick +// to remove (unused) panning code +// + +/*! + * zoom.js 0.3 (modified for use with reveal.js) + * http://lab.hakim.se/zoom-js + * MIT licensed + * + * Copyright (C) 2011-2014 Hakim El Hattab, http://hakim.se + */ + + + +var RevealZoom = window.RevealZoom || (function(){ + + + // The current zoom level (scale) + var level = 1; + + + // The easing that will be applied when we zoom in/out + document.body.style.transition = 'transform 0.8s ease'; + document.body.style.OTransition = '-o-transform 0.8s ease'; + document.body.style.msTransition = '-ms-transform 0.8s ease'; + document.body.style.MozTransition = '-moz-transform 0.8s ease'; + document.body.style.WebkitTransition = '-webkit-transform 0.8s ease'; + + + /** + * Applies the CSS required to zoom in, prefers the use of CSS3 + * transforms but falls back on zoom for IE. + * + * @param {Object} rect + * @param {Number} scale + */ + function magnify( rect, scale ) { + + // Ensure a width/height is set + rect.width = rect.width || 1; + rect.height = rect.height || 1; + + // Center the rect within the zoomed viewport + rect.x -= ( window.innerWidth - ( rect.width * scale ) ) / 2; + rect.y -= ( window.innerHeight - ( rect.height * scale ) ) / 2; + + // Reset + if( scale === 1 ) + { + document.body.style.transform = ''; + document.body.style.OTransform = ''; + document.body.style.msTransform = ''; + document.body.style.MozTransform = ''; + document.body.style.WebkitTransform = ''; + } + + // Scale + else + { + var origin = '0px 0px'; + var transform = 'translate('+ -rect.x +'px,'+ -rect.y +'px) scale('+ scale +')'; + + document.body.style.transformOrigin = origin; + document.body.style.OTransformOrigin = origin; + document.body.style.msTransformOrigin = origin; + document.body.style.MozTransformOrigin = origin; + document.body.style.WebkitTransformOrigin = origin; + + document.body.style.transform = transform; + document.body.style.OTransform = transform; + document.body.style.msTransform = transform; + document.body.style.MozTransform = transform; + document.body.style.WebkitTransform = transform; + } + + level = scale; + } + + + // zoom to element on double click + document.querySelector( '.reveal .slides' ).addEventListener( 'dblclick', function( event ) { + event.preventDefault(); + zoomTo(event.target); + }); + + + // un-show answers on slide-change + Reveal.addEventListener( 'slidechanged', function() { + if( level !== 1 ) + { + zoomOut(); + } + }); + + + /** + * Zooms in on an HTML element. + * + * @param element: HTML element to zoom in on + */ + function zoomTo( element ) + { + // Due to an implementation limitation we can't zoom in + // to another element without zooming out first + if( level !== 1 ) + { + zoomOut(); + } + else + { + // Space around the zoomed in element to leave on screen + var padding = 20; + var bounds = element.getBoundingClientRect(); + + // are slides zoomed up, and is this done using CSS zoom? + // then incorporate this zoom! + var zoom = document.querySelector( '.reveal .slides' ).style.zoom; + var scale = (zoom < 1) ? 1 : zoom; + + var options = { + x: Math.round( bounds.left * scale - padding ), + y: Math.round( bounds.top * scale - padding ), + width: Math.round( bounds.width * scale + ( padding * 2 ) ), + height: Math.round( bounds.height * scale + ( padding * 2 ) ) + }; + + + options.scale = Math.max( Math.min( window.innerWidth / options.width, window.innerHeight / options.height ), 1 ); + + + if( options.scale > 1 ) + { + options.x *= options.scale; + options.y *= options.scale; + magnify( options, options.scale ); + } + } + } + + + // zoom out to normal scale + function zoomOut() + { + magnify( { x: 0, y: 0 }, 1 ); + level = 1; + } + + + return this; + +})(); + diff --git a/presentation/template/revealjs b/presentation/template/revealjs new file mode 160000 index 0000000..9d0a3d7 --- /dev/null +++ b/presentation/template/revealjs @@ -0,0 +1 @@ +Subproject commit 9d0a3d7d5ef46f1841fdd4e8f0397b9c311f50b0 diff --git a/presentation/template/template.html b/presentation/template/template.html new file mode 100755 index 0000000..a0999d7 --- /dev/null +++ b/presentation/template/template.html @@ -0,0 +1,208 @@ + + + + + + + + $if(title)$ + + $title$$if(subtitle)$: $subtitle$ $endif$ + + $endif$ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + $if(title)$ +
+
$title$
+ $if(subtitle)$ +
$subtitle$
+ $endif$ + $if(author)$ +
$author$
+ $if(affiliation)$ +
$affiliation$
+ $endif$ + $endif$ +
+ $endif$ + + + + $if(toc)$ +
+

Outline

+ +
+ $endif$ + + + +$body$ + + +
+
+ + + + + + + + + + +