Skip to content
Snippets Groups Projects
Commit 9f6e964a authored by Nando Farchmin's avatar Nando Farchmin
Browse files

Update slides

parent a984a237
No related branches found
No related tags found
No related merge requests found
slides/img/acknowledgements.png

453 KiB

slides/img/wikipedia_uq.png

158 KiB

slides/img/xkcd_error_bars.png

11.5 KiB

......@@ -84,42 +84,56 @@
\begin{document}
\maketitle
\begin{frame}[fragile, shrink=0]{Table of Contents}
\begin{frame}[fragile, shrink=-50]{Table of Contents}
\vspace{1cm}
\tableofcontents
\end{frame}
% -----------------------------------------------------------------------------
\section{Inverse Problems}
\subsection{What are inverse problems?}
\section{Introduction}
\begin{frame}[fragile, shrink=0]{What are inverse problems?}
\phantom{a}
{\bf Examples:}
% \pause
\pause
\begin{center}
\includegraphics[width=4cm]{img/daredevil.jpg}
\includegraphics[width=4cm]{img/magneto.png}
\includegraphics[width=4cm]{img/antman.png}
\includegraphics[width=4cm]{img/magneto.png}
\end{center}
% \pause
\pause
\begin{alertblock}{Inverse Problem}
Given a forward model $f\colon \mathbb{R}^M \to \mathbb{R}^J$ and a measurement $\delta_{\mathrm{meas}}\in\mathbb{R}^J$, find parameters $y^*\in\mathbb{R}^M$, such that $f(y^*) = \delta_{\mathrm{meas}}$.
\end{alertblock}
\end{frame}
\subsection{What is uncertainty quantification?}
\begin{frame}[fragile, shrink=0]{What is Uncertainty Quantification?}
\pause
\begin{minipage}{0.48\textwidth}
\includegraphics[width=\textwidth]{img/wikipedia_uq.png}
\end{minipage}
\hfill
\pause
\begin{minipage}{0.48\textwidth}
\includegraphics[width=\textwidth]{img/xkcd_error_bars.png}
\end{minipage}
\end{frame}
\subsection{The Bayesian framework}
\begin{frame}[fragile, shrink=0]{Problems}
% slow forward problems
% forward model not invertible
% non-unique solution
% how to estimate uncertainties?
\end{frame}
% -----------------------------------------------------------------------------
\section{Surrogates for Time-Intense Functions}
\section{Function Approximation}
\begin{frame}[fragile, shrink=0]{Overview}
\begin{alertblock}{Question}
If we have a function $f\colon\mathbb{R}^M\to\mathbb{R}^J$, for which the computation $f(y)$ for some $y\in\Gamma\subset\mathbb{R}^M$ takes a long time, can we construct a function $u(y) \approx f(y)$ for all $y\in\Gamma$ which is fast to evaluate?
\end{alertblock}
% \pause
\pause
\begin{minipage}{0.38\textwidth}
{\bf Examples:}
\begin{itemize}[label=\color{ptb}{\textbullet}]
......@@ -141,11 +155,11 @@
\begin{alertblock}{Notation -- Part 1}
Let $\Gamma_m\subset\mathbb{R}$, $\pi_m$ a measure on $\Gamma_m$ and $\{ P_j^m \}_{j\in\mathbb{N}_0}$ an orthonormal basis of $L^2(\Gamma_m,\pi_m)$.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Notation -- Part 2}
Define $\Gamma=\Gamma_1\times\dots\times\Gamma_M$, $\pi(y) = \prod_{m=1}^M \pi_m(y_m)$ and $\{ P_\mu=\prod_{m=1}^M P_{\mu_m}^m \}_{\mu\in\mathbb{N}_0^M}$, which is an orthonormal basis of $L^2(\Gamma,\pi)$.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Definition: Polynomial Chaos}
If $f\in L^2(\Gamma,\pi;\mathbb{R}^J)$, then
\begin{equation*}
......@@ -160,7 +174,7 @@
\begin{alertblock}{In Practice}
Choose $\Lambda\subset\mathbb{N}_0^M$ and use $f(y) \approx u(y) = \sum_{\mu\in\Lambda} \discretized{u}[\mu]P_\mu(y)$.
\end{alertblock}
% \pause
\pause
\begin{minipage}{0.36\textwidth}
\includegraphics[width=5cm]{img/multiindex_sets.png}
\end{minipage}
......@@ -172,7 +186,7 @@
\item sparse grids
\item really sparse sets
\end{itemize}
% \pause
\pause
\begin{alertblock}{Properties}
It holds $\mathbb{E}_\pi[f] = \discretized{f}[0]$ and $\operatorname{Var}[f] = \sum_{\mu\in\mathbb{N}_0^M}\discretized{f}[\mu]^2 - \discretized{f}[0]^2\approx \sum_{\mu\in\Lambda\setminus\{ 0 \}}\discretized{u}[\mu]^2 $.
\end{alertblock}
......@@ -186,7 +200,7 @@
&= \operatorname*{arg\,min}_{\discretized{w}\in\mathbb{R}^{\vert\Lambda\vert}} \Vert f - \sum_{\mu\in\Lambda} \discretized{w}[\mu] P_\mu \Vert_{L^2(\Gamma,\pi;\mathbb{R}^J)}^2\\
&\approx \operatorname*{arg\,min}_{\discretized{w}\in\mathbb{R}^{\vert\Lambda\vert}} \frac{1}{N}\sum_{i=1}^{N} \lambda^{(i)}\Vert f(y^{(i)}) - \sum_{\mu\in\Lambda} \discretized{w}[\mu] P_\mu(y^{(i)}) \Vert_{L^2(\mathbb{R}^J)}^2.
\end{align*}
% \pause
\pause
Derivation leads to the linear system $\discretized{G}\discretized{u}=\discretized{F}$ with
\begin{minipage}{0.56\textwidth}
\begin{align*}
......@@ -216,7 +230,7 @@
\operatorname{Sob}_{1,3,7} = \frac{\operatorname{Var}[S_{1,3,7}]}{\operatorname{Var}[f]}.
\end{equation*}
\end{alertblock}
% \pause
\pause
\begin{minipage}{0.48\textwidth}
\begin{alertblock}{PCE and Sobol Indices}
\begin{equation*}
......@@ -236,17 +250,17 @@
\section*{Tutorial 2: Sensitivity Analysis}
\section{Bayesian Inverse Problem}
\section{Inverse Problems}
\begin{frame}[fragile, shrink=0]{Setting}
\begin{alertblock}{Inverse Problem}
Given a forward model $f\colon \mathbb{R}^M \to \mathbb{R}^J$ and a measurement $\delta_{\mathrm{meas}}\in\mathbb{R}^J$, find parameters $y^*\in\mathbb{R}^M$, such that $f(y^*) = \delta_{\mathrm{meas}}$.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Stochastic Regularization}
Find $y^*$ such that $\qquad\delta_{\mathrm{meas}} = f(y^*) + \varepsilon\qquad$ for $\varepsilon\sim\mathcal{N}(0, \Sigma)$.
\end{alertblock}
% \pause
\pause
\begin{minipage}{0.58\textwidth}
\begin{alertblock}{Bayes Inverse Problem}
The Bayesian posterior density is given by $\pi_{y|\delta} = Z^{-1}\,\pi_{\delta|y}\,\pi$ for
......@@ -266,12 +280,12 @@
Surrogate is given by $f(\omega) = \bigl(\sin(\omega x_j)\bigr)_{j=1,\dots,J}\in\mathbb{R}^J$ but measurements are $\delta_{\mathrm{meas}} = \bigl(2\sin(\omega x_j)+10\bigr)_{j=1,\dots,J}$.
Choose forward model error, e.g., $\delta_{\mathrm{meas}} - (a f(y) + b)$ and let $a\sim\pi_a$, $b\sim\pi_b$.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Example: Measurement Model Error}
The likelihood requires specification of noise $\varepsilon\sim \mathcal{N}(0,\Sigma)$, which might not be known exactly.
Choose measurement model error, e.g., $\Sigma(c) = c^2I$ and let $c\sim\pi_c$.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Extended Posterior}
Define $\tilde\pi_{y,a,b,c|\delta}\propto\tilde\pi_{\delta|y,a,b,c}\tilde\pi$ for $\tilde\pi = \pi_y\pi_a\pi_b\pi_c$ and
\begin{equation*}
......@@ -286,7 +300,7 @@
Assume different measurements $\delta_{\mathrm{meas}}^{[k]}$ for one line grating (XRR, GIXRF, \dots) with according different forward models $f^{[k]}(y)$.
Then we can combine the information to make prediction better and uncertainties smaller.
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Combined Posterior}
The combined posterior is given by $\pi_{y|\delta^{[1]},\dots,\delta^{[K]}} = \prod_{k=1}^K \pi_{\delta^{[k]}|y} \pi_y$ for
\begin{equation*}
......@@ -294,7 +308,7 @@
= \frac{1}{(2\pi)^{M/2}\sqrt{\det\Sigma^{[k]}}} e^{-\frac{1}{2}\Vert \delta_{\mathrm{meas}}^{[k]} - f^{[k]}(y)\Vert_{(\Sigma^{[k]})^{-1}}^2}
\end{equation*}
\end{alertblock}
% \pause
\pause
\begin{alertblock}{Note}
This means that the $k-th$ posterior acts as the prior for posterior $k+1$.
\end{alertblock}
......@@ -303,7 +317,7 @@
\begin{frame}[fragile, shrink=0]{Uncertainty Quantification}
\begin{minipage}{0.48\textwidth}
{\bf Methods for parameter estimation:}
% \pause
\pause
\begin{itemize}[label=\color{ptb}{\textbullet}]
\item educated guess
\item compute critical points via derivatives
......@@ -312,7 +326,7 @@
\item \dots
\end{itemize}
\vspace{0.2cm}
% \pause
\pause
{\bf Methods for UQ:}
\begin{itemize}[label=\color{ptb}{\textbullet}]
\item {\bf \color{ptbblue}Markov-chain Monte Carlo (MCMC)}
......@@ -322,7 +336,7 @@
\item \dots
\end{itemize}
\end{minipage}
% \pause
\pause
\begin{minipage}{0.48\textwidth}
\includegraphics[width=\textwidth]{img/random_walk.png}
\end{minipage}
......@@ -379,4 +393,8 @@
\section*{Tutorial 3: MCMC}
\begin{frame}[fragile, shrink=0]{Acknowledgements}
\includegraphics[width=\textwidth]{img/acknowledgements.png}
\end{frame}
\end{document}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment