diff --git a/EIVPackage/EIVGeneral/coverage_plotting.py b/EIVPackage/EIVGeneral/coverage_plotting.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad944b557d6223f89add11e6dd2d9fcb25008f1a
--- /dev/null
+++ b/EIVPackage/EIVGeneral/coverage_plotting.py
@@ -0,0 +1,180 @@
+"""
+Get the numerical vs the theoretical coverage for different coverage factors
+and plot them. This module contains
+- The function `get_coverages`, that returns two numpy arrays containing
+  the numerical and theoretical coverage.
+- The function `get_coverages_with_uncertainties`, that runs through several
+  networks and collects the results of `get_coverages`.
+- The function `plot_coverages`, that plots the results of
+  `get_coverage_distribution`.
+"""
+import importlib
+import os
+import argparse
+import json
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from tqdm import tqdm
+
+from EIVArchitectures import Networks
+from EIVTrainingRoutines import train_and_store
+from EIVGeneral.coverage_metrics import epistemic_coverage, normalized_std
+from EIVData.repeated_sampling import repeated_sampling
+
+import matplotlib.pyplot as plt
+
+def get_coverages(not_averaged_predictions, y,\
+        q_range=np.linspace(0.1,0.9,num=30)):
+    """
+    Compute the numerical and theoretical coverage for various coverage
+    factors, computed from the `q` in `q_range`, using
+    `EIVGeneral.epistemic_coverage` 
+    :param not_averaged_predictions: A tuple of tensors as in the output of
+    `FNNEIV.predict` with `take_average_of_prediction` set to `False`, i.e.:
+    the predictions of the neural net not averaged over the first dimension 
+    (the repetition dimension in `FNNEIV.predict`) and
+    the aleatoric uncertainty with a batch dimension and a feature dimension. 
+    :param y: A `torch.tensor` of the same shape then the second components
+    of `not_averaged_predictions`. If the feature dimension is missing, it is added.
+    :param q_range: An iterator through floats between 0 and 1.
+    :returns: numerical_coverage, theoretical_coverage (numpy arrays)
+    """
+    numerical_coverage_collection = []
+    theoretical_coverage_collection = []
+    for q in q_range:
+        numerical_coverage, theoretical_coverage =\
+                epistemic_coverage(\
+                not_averaged_predictions=not_averaged_predictions,
+                y=y, q=q)
+        numerical_coverage_collection.append(float(numerical_coverage))
+        theoretical_coverage_collection.append(float(theoretical_coverage))
+    return np.array(numerical_coverage_collection),\
+            np.array(theoretical_coverage_collection)
+    
+
+def get_coverage_distribution(net_iterator, dataloader_iterator,
+        device, number_of_draws, q_range=np.linspace(0.1,0.9,num=30),
+        number_of_test_samples=10, stack=True):
+    """
+    Returns the numerical and theoretical coverage for all nets in
+    `net_iterator` with dataloader from `dataloader_iterator`
+    :param net_iterator: Iterator yielding `torch.nn.Module`
+    :param dataloader_iterator: Iterator yielding `torch.utils.data.DataLoader`.
+    Should return the same number of elements than `net_iterator`.
+    :param device: The `torch.device` to be used.
+    :param number_of_draws: Integer, to be used as keyword for all networks
+    returned by `net_iterator`. Defaults to 10.
+    :param q_range: Range of floats between 0 and 1, as in 
+    `EIVGeneral.coverage_plot.get_coverages`.
+    :param number_of_test_samples: An integer, number of samples from the
+    dataloaders in dataloader_iterator to be used.
+    :param stack: Boolean. If True (default) the results will be stacked along 
+    the last dimension. If False a list will be returned.
+    :returns: num_coverage_collection, th_coverage_collection
+    """
+    num_coverage_collection, th_coverage_collection = [],[]
+    for net, dataloader in\
+     zip(net_iterator, dataloader_iterator):
+                not_av_pred_collection_out, not_av_pred_collection_sigma,\
+                    y_collection = [], [], []
+                for i, (x,y) in enumerate(dataloader):
+                    x, y = x.to(device), y.to(device)
+                    not_averaged_predictions = net.predict(x,
+                            take_average_of_prediction=False,
+                            number_of_draws=number_of_draws)
+                    not_av_pred_collection_out.append(
+                            not_averaged_predictions[0])
+                    not_av_pred_collection_sigma.append(
+                            not_averaged_predictions[1])
+                    y_collection.append(y)
+                not_av_pred_collection = [
+                        torch.concat(not_av_pred_collection_out, dim=0), 
+                        torch.concat(not_av_pred_collection_sigma, dim=0)]
+                y_collection = torch.concat(y_collection, dim=0)
+                numerical_coverage, theoretical_coverage = get_coverages(
+                        not_averaged_predictions=not_av_pred_collection,
+                        y=y_collection)
+                num_coverage_collection.append(numerical_coverage)
+                th_coverage_collection.append(theoretical_coverage)
+    if stack:
+        num_coverage_collection = np.stack(num_coverage_collection, axis=-1)
+        th_coverage_collection = np.stack(th_coverage_collection, axis=-1)
+    return num_coverage_collection, th_coverage_collection
+
+                
+
+
+
+
+
+#######
+
+# data = 'linear'
+# # load hyperparameters from JSON file
+# with open(os.path.join('/home/martin09/san/Projects/journal_eiv/Experiments/configurations',f'eiv_{data}.json'),'r') as conf_file:
+#     eiv_conf_dict = json.load(conf_file)
+# with open(os.path.join('/home/martin09/san/Projects/journal_eiv/Experiments/configurations',f'noneiv_{data}.json'),'r') as conf_file:
+#     noneiv_conf_dict = json.load(conf_file)
+# seed = 0
+# long_dataname = eiv_conf_dict["long_dataname"]
+# short_dataname = eiv_conf_dict["short_dataname"]
+# load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
+# train, test = load_data()
+# test_dataloader = DataLoader(test, batch_size=1000)
+# device = torch.device('cuda:0')
+# x,y = next(iter(test_dataloader))
+# if len(y.shape) <= 1:
+#     y = y.view((-1,1))
+# if len(x.shape) <= 1:
+#     x = x.view((-1,1))
+# x,y = x.to(device), y.to(device)
+# input_dim = x.shape[1]
+# output_dim = y.shape[1]
+# init_std_y = noneiv_conf_dict["init_std_y_list"][0]
+# unscaled_reg = noneiv_conf_dict["unscaled_reg"]
+# p = noneiv_conf_dict["p"]
+# hidden_layers = noneiv_conf_dict["hidden_layers"]
+# saved_file = os.path.join('saved_networks',
+#             f'noneiv_{short_dataname}'\
+#                     f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+#                     f'_p_{p:.2f}_seed_{seed}.pkl')
+# noneiv_net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+#         h=[input_dim, *hidden_layers, output_dim]).to(device)
+# train_and_store.open_stored_training(saved_file=saved_file,
+#         net=noneiv_net, device=device)
+# # EiV
+# init_std_y = eiv_conf_dict["init_std_y_list"][0]
+# unscaled_reg = eiv_conf_dict["unscaled_reg"]
+# p = eiv_conf_dict["p"]
+# hidden_layers = eiv_conf_dict["hidden_layers"]
+# fixed_std_x = eiv_conf_dict["fixed_std_x"]
+# saved_file = os.path.join('saved_networks',
+#         f'eiv_{short_dataname}'\
+#                 f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+#                 f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
+#                 f'_seed_{seed}.pkl')
+# eiv_net = Networks.FNNEIV(p=p, init_std_y=init_std_y,
+#         h=[input_dim, *hidden_layers, output_dim],
+#         fixed_std_x=fixed_std_x).to(device)
+# def eiv_net_iterator(seed_range=range(0,10)):
+
+# train_and_store.open_stored_training(saved_file=saved_file,
+#         net=eiv_net, device=device)
+
+# noneiv_not_averaged_predictions = noneiv_net.predict(x,\
+#         number_of_draws=100, 
+#         take_average_of_prediction=False)
+# noneiv_num_cov, noneiv_th_cov = get_coverages(noneiv_not_averaged_predictions, y,\
+#         q_range=np.linspace(0.01,0.99,num=30))
+# eiv_not_averaged_predictions = eiv_net.predict(x,\
+#         number_of_draws=[100,5], 
+#         take_average_of_prediction=False)
+# eiv_num_cov, eiv_th_cov = get_coverages(eiv_not_averaged_predictions, y,\
+#         q_range=np.linspace(0.01,0.99,num=30))
+# lin_x = np.linspace(np.min(noneiv_th_cov), np.max(noneiv_th_cov))
+# plt.plot(lin_x, lin_x)
+# plt.plot(noneiv_th_cov, noneiv_num_cov)
+# plt.plot(eiv_th_cov, eiv_num_cov)
diff --git a/EIVPackage/EIVGeneral/manipulate_datasets.py b/EIVPackage/EIVGeneral/manipulate_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..f04280e3a84f374cef1705a3e53a6607a6d0e394
--- /dev/null
+++ b/EIVPackage/EIVGeneral/manipulate_datasets.py
@@ -0,0 +1,21 @@
+from operator import itemgetter
+from torch.utils.data import Dataset
+
+
+class VerticalCut(Dataset):
+    """
+    Only include certain elements of the dataset `dataset`, namely those with
+    index in `components_to_pick`.
+    :param dataset: A torch.utils.data.Dataset
+    :param components_to_pick: List, defaults to [0,].
+    """
+    def __init__(self, dataset, components_to_pick=[0,]):
+        super(VerticalCut, self).__init__()
+        self.dataset = dataset
+        self.components_to_pick = components_to_pick
+
+    def __getitem__(self, i):
+        return itemgetter(*self.components_to_pick)(self.dataset.__getitem__(i))
+
+    def __len__(self):
+        return len(self.dataset)
diff --git a/Experiments/create_tabular.py b/Experiments/create_tabular.py
index 728b6e5228a3e1849210ff5a4552d4a35873de65..8b8373de4c5735c380cde7b9079b39e224a0af53 100644
--- a/Experiments/create_tabular.py
+++ b/Experiments/create_tabular.py
@@ -1,9 +1,8 @@
 import os
 import glob
-import argparse
 import json
 
-metrics_to_display = ['rmse','logdens','bias','true_coverage_numerical','avg_bias']
+metrics_to_display = ['rmse','coverage_theory','coverage_numerical','true_coverage_numerical','avg_bias']
 show_incomplete = True
 
 list_of_result_files = glob.glob(os.path.join('results','*.json'))
diff --git a/Experiments/evaluate_metrics.py b/Experiments/evaluate_metrics.py
index 22d4e66495a8e87cbc6230c825d728629cb53ae2..bf052e872d2e42d830e827526e52649da4d67be4 100644
--- a/Experiments/evaluate_metrics.py
+++ b/Experiments/evaluate_metrics.py
@@ -20,7 +20,7 @@ from EIVData.repeated_sampling import repeated_sampling
 
 # read in data via --data option
 parser = argparse.ArgumentParser()
-parser.add_argument("--data", help="Loads data", default='replin')
+parser.add_argument("--data", help="Loads data", default='linear')
 parser.add_argument("--no-autoindent", help="",
         action="store_true") # to avoid conflics in IPython
 args = parser.parse_args()
@@ -285,7 +285,8 @@ def collect_full_seed_range_metrics(load_data,
                 hidden_layers = noneiv_conf_dict["hidden_layers"]
                 saved_file = os.path.join('saved_networks',
                             f'noneiv_{short_dataname}'\
-                                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                                    f'_init_std_y_{init_std_y:.3f}'\
+                                    f'_ureg_{unscaled_reg:.1f}'\
                                     f'_p_{p:.2f}_seed_{seed}.pkl')
                 net = Networks.FNNBer(p=p, init_std_y=init_std_y,
                         h=[input_dim, *hidden_layers, output_dim]).to(device)
@@ -455,13 +456,7 @@ seed_list = range(noneiv_conf_dict["seed_range"][0],
 
 
 
-
-
-
-
-
-
-max_batch_number = 2
+number_of_test_samples = 2
 for seed in tqdm(seed_list):
     try:
         train_data, test_data, true_train_data, true_test_data \
@@ -478,7 +473,7 @@ for seed in tqdm(seed_list):
                 batch_size=int(np.min((len(true_test_data), 800))), shuffle=True)
     for i in tqdm(range(num_test_epochs)):
         for j, x_y_pairs in enumerate(test_dataloader):
-            if j > max_batch_number:
+            if j > number_of_test_samples:
                 break
             # fill in ground truth with None, if not existent
             if true_test_data is None:
diff --git a/Experiments/plot_coverage.py b/Experiments/plot_coverage.py
new file mode 100644
index 0000000000000000000000000000000000000000..81b3f146815a07d66aebb93f0ccbb0721bdc500f
--- /dev/null
+++ b/Experiments/plot_coverage.py
@@ -0,0 +1,168 @@
+"""
+Compute metrics for datasets for which there is not necessarily a ground truth.
+Results will be stored in the results folder
+"""
+import importlib
+import os
+import argparse
+import json
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from tqdm import tqdm
+import matplotlib.pyplot as plt
+
+from EIVArchitectures import Networks
+from EIVTrainingRoutines import train_and_store
+from EIVGeneral.coverage_plotting import get_coverage_distribution
+from EIVGeneral.manipulate_datasets import VerticalCut
+
+
+# read in data via --data option
+parser = argparse.ArgumentParser()
+parser.add_argument("--data", help="Loads data", default='naval')
+parser.add_argument("--no-autoindent", help="",
+        action="store_true") # to avoid conflics in IPython
+args = parser.parse_args()
+data = args.data
+
+# load hyperparameters from JSON file
+with open(os.path.join('configurations',f'eiv_{data}.json'),'r') as conf_file:
+    eiv_conf_dict = json.load(conf_file)
+with open(os.path.join('configurations',f'noneiv_{data}.json'),'r') as conf_file:
+    noneiv_conf_dict = json.load(conf_file)
+
+long_dataname = eiv_conf_dict["long_dataname"]
+short_dataname = eiv_conf_dict["short_dataname"]
+
+print(f"Plotting coverage for {long_dataname}")
+
+scale_outputs = False 
+load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
+
+try:
+    gpu_number = eiv_conf_dict["gpu_number"]
+    device = torch.device(f'cuda:{gpu_number}')
+    try:
+        torch.tensor([0.0]).to(device)
+    except RuntimeError:
+        if torch.cuda.is_available():
+            print('Switched to GPU 0')
+            device = torch.device('cuda:0')
+        else:
+            print('No cuda available, using CPU')
+            device = torch.device('cpu')
+except KeyError:
+    device = torch.device('cpu')
+
+
+# test whether there is a ground truth
+try:
+    train_data, test_data, true_train_data, true_test_data \
+            = load_data(seed=0, return_ground_truth=True)
+    ground_truth_exists = True
+except TypeError:
+    train_data, test_data = load_data(seed=0)
+    true_train_data, true_test_data = None, None
+    ground_truth_exists = False
+
+train_data, test_data = load_data()
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+
+## Create iterators
+seed_list = range(noneiv_conf_dict["seed_range"][0],
+        noneiv_conf_dict["seed_range"][1])
+
+# networks
+def net_iterator(eiv=True, seed_list=seed_list):
+    if eiv:
+        init_std_y = eiv_conf_dict["init_std_y_list"][0]
+        unscaled_reg = eiv_conf_dict["unscaled_reg"]
+        p = eiv_conf_dict["p"]
+        hidden_layers = eiv_conf_dict["hidden_layers"]
+        fixed_std_x = eiv_conf_dict["fixed_std_x"]
+        net = Networks.FNNEIV(p=p, init_std_y=init_std_y,
+                h=[input_dim, *hidden_layers, output_dim],
+                fixed_std_x=fixed_std_x).to(device)
+        for seed in seed_list:
+            saved_file = os.path.join('saved_networks',
+                    f'eiv_{short_dataname}'\
+                            f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                            f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
+                            f'_seed_{seed}.pkl')
+            train_and_store.open_stored_training(saved_file=saved_file,
+                    net=net, device=device)
+            yield net
+    else:
+        init_std_y = noneiv_conf_dict["init_std_y_list"][0]
+        unscaled_reg = noneiv_conf_dict["unscaled_reg"]
+        p = noneiv_conf_dict["p"]
+        hidden_layers = noneiv_conf_dict["hidden_layers"]
+        net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+                h=[input_dim, *hidden_layers, output_dim]).to(device)
+        for seed in seed_list:
+            saved_file = os.path.join('saved_networks',
+                        f'noneiv_{short_dataname}'\
+                                f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                                f'_p_{p:.2f}_seed_{seed}.pkl')
+            train_and_store.open_stored_training(saved_file=saved_file,
+                    net=net, device=device)
+            yield net
+
+# dataloaders
+def dataloader_iterator(seed_list=seed_list, use_ground_truth=False,
+        batch_size = 1000):
+    for seed in seed_list:
+        if not use_ground_truth:
+            train_data, test_data = load_data(seed=seed)
+            test_dataloader = DataLoader(test_data, 
+                    batch_size=batch_size,
+                    shuffle=True)
+            yield test_dataloader
+        else:
+            assert ground_truth_exists
+            _, _, _, true_test =\
+                    load_data(seed=seed, return_ground_truth=True)
+            # take noisy x but unnoisy y
+            cut_true_test = VerticalCut(true_test, components_to_pick=[2,1])
+            test_dataloader = DataLoader(cut_true_test, 
+                    batch_size=batch_size,
+                    shuffle=True)
+            yield test_dataloader
+
+
+
+
+eiv_numerical_coverage, eiv_theoretical_coverage = get_coverage_distribution(
+        net_iterator=net_iterator(eiv=True),
+        dataloader_iterator=dataloader_iterator(),
+        device=device,
+        number_of_draws=[100,5])
+mean_eiv_theoretical_coverage = np.mean(eiv_theoretical_coverage, axis=1)
+std_eiv_theoretical_coverage = np.std(eiv_theoretical_coverage, axis=1)
+mean_eiv_numerical_coverage = np.mean(eiv_numerical_coverage, axis=1)
+std_eiv_numerical_coverage = np.std(eiv_numerical_coverage, axis=1)
+noneiv_numerical_coverage, noneiv_theoretical_coverage = get_coverage_distribution(
+        net_iterator=net_iterator(eiv=False),
+        dataloader_iterator=dataloader_iterator(),
+        device=device,
+        number_of_draws=100)
+mean_noneiv_theoretical_coverage = np.mean(noneiv_theoretical_coverage, axis=1)
+std_noneiv_theoretical_coverage = np.std(noneiv_theoretical_coverage, axis=1)
+mean_noneiv_numerical_coverage = np.mean(noneiv_numerical_coverage, axis=1)
+std_noneiv_numerical_coverage = np.std(noneiv_numerical_coverage, axis=1)
+plt.plot(mean_eiv_theoretical_coverage, mean_eiv_numerical_coverage, color='r', label='EiV')
+plt.fill_between(mean_eiv_theoretical_coverage, mean_eiv_numerical_coverage
+        - std_eiv_numerical_coverage,
+        mean_eiv_numerical_coverage + std_eiv_numerical_coverage, color='r', alpha=0.5)
+plt.plot(mean_noneiv_theoretical_coverage, mean_noneiv_numerical_coverage, color='b', label='nonEiV')
+plt.fill_between(mean_noneiv_theoretical_coverage, mean_noneiv_numerical_coverage
+        - std_noneiv_numerical_coverage,
+        mean_noneiv_numerical_coverage + std_noneiv_numerical_coverage, color='b', alpha=0.5)
+diag_x = np.linspace(0, np.max(mean_eiv_numerical_coverage))
+plt.plot(diag_x, diag_x, 'k--')
+plt.show()
+