diff --git a/EIVPackage/EIVArchitectures/Networks.py b/EIVPackage/EIVArchitectures/Networks.py
index 09d75b877ba2617af15447e9bacd22b8581de35d..a8f12008853729aaffdb24e13077ac96f0487420 100644
--- a/EIVPackage/EIVArchitectures/Networks.py
+++ b/EIVPackage/EIVArchitectures/Networks.py
@@ -249,12 +249,13 @@ class FNNEIV(nn.Module):
                 number_of_parameter_chunks=number_of_parameter_chunks,
                 remove_graph=remove_graph,
                 take_average_of_prediction=False)
-        # Add "repetition" dimension to y and out
+        # Add "repetition" dimension to y and sigmas
         y = y[:,None,...]
         sigmas = sigmas[:,None,...]
+        # add an output axis if necessary
         if len(y.shape) <= 2:
-            # add an output axis if necessary
             y = y[...,None]
+        if len(sigmas.shape) <= 2:
             sigmas = sigmas[...,None]
         # squeeze last dimensions into one
         y = y.view((*y.shape[:2], -1))
@@ -285,6 +286,39 @@ class FNNEIV(nn.Module):
         else:
             return predictive_log_density_values
 
+    def predict_mean_and_unc(self, x, number_of_draws=[100,5], number_of_parameter_chunks = None,
+            remove_graph=True):
+        """
+        Take the mean and standard deviation over `number_of_draws` forward
+        passes and return them together with the predicted sigmas.
+        **Note**: This method does neither touch the input noise nor Dropout.
+        The corresponding setting is left to the user!
+        :param x: A torch.tensor, the input
+        :param number_of_draws: An integer or a list. If an integer
+        `number_of_draws`, will be converted internally to
+        `[number_of_draws,1]`.Numbers of draws to obtain from x via parameter
+        sampling (first element) and noise input sampling (second element).
+        :param number_of_parameter_chunks: An integer or None (default). If
+        None, the second element of `number_of_draws` will be taken (and will
+        thus be identical to 1 if `number_of_draws` is an integer, see above).
+        Samples in the parameter space will be divided into
+        `number_of_parameter_chunks` chunks when collected. Can be used to
+        reduced the memory usage. 
+        :param remove_graph: If True (default) the output will 
+        be detached to save memory
+        :return: mean, std, sigmas
+        """
+        out, sigmas = self.predict(x=x,
+                number_of_draws=number_of_draws,
+                number_of_parameter_chunks=number_of_parameter_chunks,
+                remove_graph=remove_graph,
+                take_average_of_prediction=False)
+        mean = torch.mean(out, dim=1)
+        std = torch.std(out, dim=1)
+        return mean, std, sigmas
+
+        
+
 
 class FNNBer(nn.Module):
     """
diff --git a/EIVPackage/EIVGeneral/coverage_metrices.py b/EIVPackage/EIVGeneral/coverage_metrices.py
new file mode 100644
index 0000000000000000000000000000000000000000..71876483b07af9b3175f8e957130422e618dc49b
--- /dev/null
+++ b/EIVPackage/EIVGeneral/coverage_metrices.py
@@ -0,0 +1,81 @@
+"""
+Computes the coverage of observations by predictions and uncertainty and
+similar quantities. This module contains three functions
+- epistemic_coverage: Computes the coverage of observations by predictions and
+  their epistemic uncertainty and the averaged theoretical ground truth. If
+  `normalized` is set to True, residuals are normalized and a fixed interval
+  length is considered.
+- normalized_std: Computes the standard deviation of the normalized residuals.
+"""
+import numpy as np
+import scipy.stats
+import torch
+
+def logical_and_along_dimension(x, dim=-1, keepdim=False):
+    """
+    For a boolean tensor `x` perform a logical AND along the axis `dim`.
+    If `keepdim` is true the axis `dim` will be kept (defaults to False).
+    """
+    return torch.prod(x.to(torch.bool), dim=dim, keepdim=keepdim).to(torch.bool)
+
+
+def multivariate_interval_length(dim, q=0.95):
+    """
+    Returns the half side length of multivariate cube, symmetrically centered
+    around 0 such that its measure under a standard normal distribution equals
+    `q`.
+    :param dim: A non-negative integer, the dimension.
+    :param q: Float, should be between 0 and 1. Defaults to 0.95.
+    """
+    # use independence of components to reduce to a univariate quantile
+    univariate_quantile = 0.5 * (1+0.95**(1/dim))
+    return scipy.stats.norm.ppf(univariate_quantile)
+
+
+def epistemic_coverage(mean_unc,  y, q=0.95, normalize_errors=False):
+    mean, epis_unc, aleat_unc = mean_unc
+    assert epis_unc.shape == aleat_unc.shape
+    assert mean.shape == epis_unc.shape
+    # Add feature dimension to y if missing
+    if len(y.shape) <= 1:
+        y = y.view((-1,1))
+    assert y.shape == mean.shape
+    # fix interval based on epis_unc
+    interval_length = multivariate_interval_length(dim=y.shape[1], q=q) \
+            * epis_unc
+    total_unc = torch.sqrt(epis_unc**2 + aleat_unc **2)
+    # numerical computation
+    errors = mean - y
+    if normalize_errors:
+        assert errors.shape == total_unc.shape
+        errors /= total_unc
+    check_if_in_interval = logical_and_along_dimension(
+            torch.abs(errors) <= interval_length, dim=1)
+    numerical_coverage = torch.mean(
+            check_if_in_interval.to(torch.float32)
+            ).cpu().detach().item()
+    # theoretical computation
+    if not normalize_errors:
+        cdf_args = (interval_length/total_unc).detach().cpu().numpy()
+        cdf_values = scipy.stats.norm.cdf(cdf_args)
+        prob_values = 2*cdf_values -1
+        assert len(cdf_values.shape) == 2
+        theoretical_coverage = np.mean(np.prod(prob_values, axis=1)).item()
+    else:
+        theoretical_coverage = q
+    return numerical_coverage, theoretical_coverage
+
+def normalized_std(mean_unc, y):
+    mean, epis_unc, aleat_unc = mean_unc
+    assert epis_unc.shape == aleat_unc.shape
+    assert mean.shape == epis_unc.shape
+    # Add feature dimension to y if missing
+    if len(y.shape) <= 1:
+        y = y.view((-1,1))
+    assert y.shape == mean.shape
+    total_unc = torch.sqrt(epis_unc**2 + aleat_unc **2)
+    # numerical computation
+    errors = mean - y
+    assert errors.shape == total_unc.shape
+    errors /= total_unc
+    return torch.mean(torch.std(errors, dim=0)).item()