diff --git a/Experiments/plot_prediction.py b/Experiments/plot_prediction.py
index ef3891e7bdedb66564d3fd8bc790f2c72e2713f8..ab29feddb8d26242f83d690bcf25bf5207b78943 100644
--- a/Experiments/plot_prediction.py
+++ b/Experiments/plot_prediction.py
@@ -18,10 +18,21 @@ from EIVTrainingRoutines import train_and_store
 k = 1.96
 
 def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
-        x_noise_seed = 0, plotting_seed = 0, number_of_test_datapoints = 100):
+        x_noise_seed = 0, y_noise_seed = 1,
+        plotting_seed = 0, number_of_test_datapoints = 100):
     """
     Iterate through networks associated with `data` (short dataname) 
-    throuth their JSON configuration and evaluate their prediction and uncertainties for the (true) x in `x_range`. In addition, the values of the noisified `x_range` and
+    throuth their JSON configuration and evaluate their prediction and
+    uncertainties for the (true) x in `x_range`. The results returned are as numpy
+    arrays included in a `plotting_dictionary` that contains the predictions
+    and uncertainties via the keys "prediction" and "uncertainty" but also the
+    noisified version of `x_range` and the corresponding y values (key
+    "range_points") and `number_of_test_datapoints` points from the test
+    dataset with seed `plotting_seed` (key "test_data_points").
+
+    **Note**: The output of the neural networks are assumed to be
+    one-dimensional .
+
     :data: String, short dataname
     :x_range: An iterator yielding the (true) x values to consider
     :eiv: Boolean. If True an EiV model is used, else an non-EiV model.
@@ -29,11 +40,15 @@ def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
     non-EiV models and a two-element list for EiV models.
     :x_noise_seed: An integer. Will be used as a seed to generate the noise put
     on `x_range`.
+    :y_noise_seed: An integer. Will be used as a seed to generate the noise put
+    on `func(x_range)` that will be returned with `range_values` in the
+    `plotting_dictionary`.
     :plotting_seed: An integer. Needed for choosing which of the test datasets
     will be used to returning the test datapoints.
     :number_of_test_datapoints: A positive integer. Number of test_datapoints
-    :returns: numerical_coverage, theoretical_coverage
+    :returns: plotting_dictionary
     """
+    plotting_dictionary = {}
     # load configuration file
     if eiv:
         with open(os.path.join('configurations',f'eiv_{data}.json'),'r') as\
@@ -51,6 +66,9 @@ def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
     load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
     x_noise_strength =\
         importlib.import_module(f'EIVData.{long_dataname}').x_noise_strength
+    y_noise_strength =\
+        importlib.import_module(f'EIVData.{long_dataname}').y_noise_strength
+    func = importlib.import_module(f'EIVData.{long_dataname}').func
 
     # switch to gpu, if possible
     try:
@@ -69,10 +87,13 @@ def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
         device = torch.device('cpu')
 
 
-    _, test_data = load_data(seed=plotting_seed)
+    _, test_data = load_data(seed=plotting_seed, return_ground_truth=False)
     input_dim = test_data[0][0].numel()
     output_dim = test_data[0][1].numel()
+    assert output_dim == 1
     test_x, test_y = test_data[:number_of_test_datapoints]
+    plotting_dictionary['test_data_points'] = (test_x.detach().cpu().numpy(),
+            test_y.detach().cpu().numpy())
 
     ## Create iterators for get_coverage_distribution
     seed_list = range(conf_dict["seed_range"][0],
@@ -124,13 +145,20 @@ def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
                         net=net, device=device)
                 yield net
     
-    noisy_x = x_range + x_noise_strength * torch.randn(x_range.shape,
+    x_range = x_range.view((-1, input_dim))
+    noisy_x_range = x_range + x_noise_strength * torch.randn(x_range.shape,
             generator=torch.Generator().manual_seed(x_noise_seed))
     # add feature dimension (if necessary) and move to device
-    noisy_x = noisy_x.view((-1,input_dim)).to(device)
+    noisy_x_range = noisy_x_range.to(device)
+    noisy_y_range = func(x_range) + y_noise_strength *\
+    torch.randn(x_range.shape,
+            generator=torch.Generator().manual_seed(y_noise_seed))
+    plotting_dictionary['range_points'] =\
+            (noisy_x_range.detach().cpu().numpy(),
+            noisy_y_range.detach().cpu().numpy())
     mean_collection, unc_collection = [], []
     for net in net_iterator(eiv=eiv):
-        mean, unc = net.predict_mean_and_unc(noisy_x,
+        mean, unc = net.predict_mean_and_unc(noisy_x_range,
                 number_of_draws=number_of_draws)[:2]
         if len(mean.shape) > 1:
                 # assume only batch and output dimension
@@ -149,8 +177,10 @@ def compute_predictions_and_uncertainties(data, x_range, eiv, number_of_draws,
     mean_collection = torch.stack(mean_collection, dim=0)
     unc_collection = torch.stack(unc_collection, dim=0)
     # average
-    averaged_mean = torch.mean(mean_collection, dim=0)
-    averaged_unc = torch.mean(unc_collection, dim=0)
-    return averaged_mean, averaged_unc, (test_x, test_y)
+    plotting_dictionary['prediction'] =\
+            torch.mean(mean_collection, dim=0).detach().cpu().numpy()
+    plotting_dictionary['uncertainty'] =\
+            torch.mean(unc_collection, dim=0).detach().cpu().numpy()
+    return plotting_dictionary
+
 
-