diff --git a/EIVPackage/EIVArchitectures/Networks.py b/EIVPackage/EIVArchitectures/Networks.py
index a8f12008853729aaffdb24e13077ac96f0487420..56c76336dacc479e25a4915e941fc336f77282ee 100644
--- a/EIVPackage/EIVArchitectures/Networks.py
+++ b/EIVPackage/EIVArchitectures/Networks.py
@@ -476,6 +476,31 @@ class FNNBer(nn.Module):
         else:
             return predictive_log_density_values
 
+    def predict_mean_and_unc(self, x, number_of_draws=100, 
+            remove_graph=True):
+        """
+        Take the mean and standard deviation over `number_of_draws` forward
+        passes and return them together with the predicted sigmas.
+        **Note**: This method does not touch the Dropout.
+        The corresponding setting is left to the user!
+        :param x: A torch.tensor, the input
+        :param number_of_draws: An integer or a list. If an integer
+        `number_of_draws`, will be converted internally to
+        `[number_of_draws,1]`.Numbers of draws to obtain from x via parameter
+        sampling (first element) and noise input sampling (second element).
+        :param remove_graph: If True (default) the output will 
+        be detached to save memory
+        :return: mean, std, sigmas
+        """
+        out, sigmas = self.predict(x=x,
+                number_of_draws=number_of_draws,
+                remove_graph=remove_graph,
+                take_average_of_prediction=False)
+        mean = torch.mean(out, dim=1)
+        std = torch.std(out, dim=1)
+        return mean, std, sigmas
+
+
 
 class SmallFNNBer(FNNBer):
     """
diff --git a/EIVPackage/EIVGeneral/coverage_metrices.py b/EIVPackage/EIVGeneral/coverage_metrices.py
index 71876483b07af9b3175f8e957130422e618dc49b..1c6ea70cd2e015d319aee481255a2c22075c4453 100644
--- a/EIVPackage/EIVGeneral/coverage_metrices.py
+++ b/EIVPackage/EIVGeneral/coverage_metrices.py
@@ -32,8 +32,31 @@ def multivariate_interval_length(dim, q=0.95):
     return scipy.stats.norm.ppf(univariate_quantile)
 
 
-def epistemic_coverage(mean_unc,  y, q=0.95, normalize_errors=False):
-    mean, epis_unc, aleat_unc = mean_unc
+def epistemic_coverage(prediction_triple,  y, q=0.95, normalize_errors=False):
+    """
+    Returns the average coverage of `y` by the interval 
+    "prefactor * (predictions + q-Interval)",
+    where "q-Interval" is the interval of measure `q` under the standard normal, 
+    "predictions" is the first component of `prediction_triple` and prefactor is either
+    the epistemic uncertainty, given by the second component of `prediction_triple`, if
+    `normalize_errors` is False, or 1 if it is true. The coverage is returned
+    as given by `y` and as a theoretical_coverage computed from the epistemic
+    uncertainty (second component of `prediction_triple`) and the aleatoric uncertainty
+    (third component of `prediction_triple`)
+    :param prediction_triple: A triple of tensors containing (in this order): the
+    predictions of the neural net (the average under the posterior), the
+    epistemic uncertainty (the standard deviation under the posterior) and
+    the aleatoric uncertainty. All tensors are expected to have two dimensions:
+    a batch and a feature dimension.
+    :param y: A `torch.tensor` of the same shape then the first two components
+    of `prediction_triple`. If the feature dimension is missing, it is added.
+    :param q: A float between 0 and 1. Defaults to 0.95.
+    :param normalize_errors: If True, the deviations between predictions and
+    `y` are normalized by the total uncertainty, computed from the aleatoric
+    and epistemic uncertainty and the coverage w.r.t. q-interval is computed.
+    :returns: numerical_coverage, theoretical_coverage
+    """
+    mean, epis_unc, aleat_unc = prediction_triple
     assert epis_unc.shape == aleat_unc.shape
     assert mean.shape == epis_unc.shape
     # Add feature dimension to y if missing
@@ -65,8 +88,19 @@ def epistemic_coverage(mean_unc,  y, q=0.95, normalize_errors=False):
         theoretical_coverage = q
     return numerical_coverage, theoretical_coverage
 
-def normalized_std(mean_unc, y):
-    mean, epis_unc, aleat_unc = mean_unc
+def normalized_std(prediction_triple, y):
+    """
+    Returns the standard deviation of normalized residuals. In theory this
+    number should be equal to 1.0.
+    :param prediction_triple: A triple of tensors containing (in this order): the
+    predictions of the neural net (the average under the posterior), the
+    epistemic uncertainty (the standard deviation under the posterior) and
+    the aleatoric uncertainty.
+    :param y: A `torch.tensor` of the same shape then the first two components
+    of `prediction_triple`. If the feature dimension is missing, it is added.
+    :returns: numerical_coverage, theoretical_coverage
+    """
+    mean, epis_unc, aleat_unc = prediction_triple
     assert epis_unc.shape == aleat_unc.shape
     assert mean.shape == epis_unc.shape
     # Add feature dimension to y if missing
diff --git a/Experiments/evaluate_tabular.py b/Experiments/evaluate_tabular.py
index d74c553789953779c12fee3a70f02de4b9d08328..cfa6664eed259441c865c36612d00063f9d0098f 100644
--- a/Experiments/evaluate_tabular.py
+++ b/Experiments/evaluate_tabular.py
@@ -1,6 +1,5 @@
 import importlib
 import os
-import matplotlib
 
 import numpy as np
 import torch
@@ -10,7 +9,7 @@ from tqdm import tqdm
 
 from EIVArchitectures import Networks
 from EIVTrainingRoutines import train_and_store
-
+from EIVGeneral.coverage_metrices import epistemic_coverage, normalized_std
 
 long_dataname = 'energy_efficiency'
 short_dataname = 'energy'
@@ -20,8 +19,6 @@ train_noneiv = importlib.import_module(f'train_noneiv_{short_dataname}')
 train_eiv = importlib.import_module(f'train_eiv_{short_dataname}')
 
 train_data, test_data = load_data()
-test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data),
-    64))), shuffle=True)
 input_dim = train_data[0][0].numel()
 output_dim = train_data[0][1].numel()
 
@@ -29,6 +26,8 @@ def collect_metrics(x,y, seed=0,
     noneiv_number_of_draws=100, eiv_number_of_draws=[100,5],
     decouple_dimensions=False, device=torch.device('cuda:1')):
     """
+    Compute various metrics for EiV and non-EiV. Will be returned as
+    dictionaries.
     :param x: A torch.tensor, taken as input
     :param y: A torch.tensor, taken as output
     :param seed: Integer. The seed used for loading, defaults to 0.
@@ -40,10 +39,13 @@ def collect_metrics(x,y, seed=0,
     of Gal et al. is followed where, in the evaluation of the
     log-posterior-predictive, each dimension is treated independently and then
     averaged. If False (default), a multivariate distribution is used.
-    :returns: noneiv_rmse, noneiv_logdens, noneiv_bias,
-    eiv_rmse, eiv_logdens, eiv_bias
+    :returns: Dictionaries noneiv_metrics, eiv_metrics
     """
     x,y = x.to(device), y.to(device)
+
+
+    # non-EiV
+    noneiv_metrics = {}
     init_std_y = train_noneiv.init_std_y_list[0]
     unscaled_reg = train_noneiv.unscaled_reg
     p = train_noneiv.p
@@ -61,23 +63,28 @@ def collect_metrics(x,y, seed=0,
     # RMSE
     training_state = net.training
     net.train()
-    out = net.predict(x, number_of_draws=noneiv_number_of_draws, 
-            take_average_of_prediction=True)[0]
+    ######
+    prediction_triple =\
+            net.predict_mean_and_unc(x, number_of_draws=noneiv_number_of_draws)
     if len(y.shape) <= 1:
         y = y.view((-1,1))
-    assert y.shape == out.shape
-    res = y-out
+    assert y.shape == prediction_triple[0].shape
+    res = y-prediction_triple[0]
     scale = train_data.dataset.std_labels.to(device)
     scaled_res = res * scale.view((1,-1))
     scaled_res = scaled_res.detach().cpu().numpy().flatten()
-    noneiv_rmse = np.sqrt(np.mean(scaled_res**2))
-    noneiv_bias = np.mean(scaled_res)
+    noneiv_metrics['rmse'] = np.sqrt(np.mean(scaled_res**2))
+    noneiv_metrics['bias'] = np.mean(scaled_res)
+    noneiv_metrics['coverage_numerical'], noneiv_metrics['coverage_theory'] =\
+            epistemic_coverage(prediction_triple, y, normalize_errors=False)
+    noneiv_metrics['coverage_normalized'],_ =\
+            epistemic_coverage(prediction_triple, y, normalize_errors=True)
+    noneiv_metrics['res_std'] = normalized_std(prediction_triple, y)
+    
 
 
     # NLL
-    training_state = net.training
-    net.train()
-    noneiv_logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+    noneiv_metrics['logdens' ]= net.predictive_logdensity(x, y, number_of_draws=100,
             decouple_dimensions=decouple_dimensions,
             scale_labels=\
                    train_data.dataset.std_labels.view((-1,)).to(device)\
@@ -88,6 +95,7 @@ def collect_metrics(x,y, seed=0,
         net.eval()
 
     # EiV
+    eiv_metrics = {}
     init_std_y = train_eiv.init_std_y_list[0]
     unscaled_reg = train_eiv.unscaled_reg
     p = train_eiv.p
@@ -103,36 +111,32 @@ def collect_metrics(x,y, seed=0,
             fixed_std_x=fixed_std_x).to(device)
     train_and_store.open_stored_training(saved_file=saved_file,
             net=net)
+
     # RMSE
     training_state = net.training
     noise_state = net.noise_is_on
     net.train()
     net.noise_on()
-    out = net.predict(x, number_of_draws=eiv_number_of_draws,
-            take_average_of_prediction=True)[0]
-    if len(y.shape) <=1:
+    prediction_triple =\
+            net.predict_mean_and_unc(x, number_of_draws=eiv_number_of_draws)
+    if len(y.shape) <= 1:
         y = y.view((-1,1))
-    assert y.shape == out.shape
-    res = y-out
+    assert y.shape == prediction_triple[0].shape
+    res = y-prediction_triple[0]
     scale = train_data.dataset.std_labels.to(device)
     scaled_res = res * scale.view((1,-1))
     scaled_res = scaled_res.detach().cpu().numpy().flatten()
-    eiv_rmse = np.sqrt(np.mean(scaled_res**2))
-    eiv_bias = np.mean(scaled_res)
-    if training_state:
-        net.train()
-    else:
-        net.eval()
-    if noise_state:
-        net.noise_on()
-    else:
-        net.noise_off()
+    eiv_metrics['rmse' ]= np.sqrt(np.mean(scaled_res**2))
+    eiv_metrics['bias' ]= np.mean(scaled_res)
+    eiv_metrics['coverage_numerical'], eiv_metrics['coverage_theory'] =\
+            epistemic_coverage(prediction_triple, y, normalize_errors=False)
+    eiv_metrics['coverage_normalized'],_ =\
+            epistemic_coverage(prediction_triple, y, normalize_errors=True)
+    eiv_metrics['res_std' ]= normalized_std(prediction_triple, y)
 
 
     # NLL
-    training_state = net.training
-    net.train()
-    eiv_logdens = net.predictive_logdensity(x, y,
+    eiv_metrics['logdens' ]= net.predictive_logdensity(x, y,
             number_of_draws=eiv_number_of_draws,
             decouple_dimensions=decouple_dimensions,
             scale_labels=\
@@ -142,15 +146,19 @@ def collect_metrics(x,y, seed=0,
         net.train()
     else:
         net.eval()
-    return noneiv_rmse, noneiv_logdens, noneiv_bias, \
-            eiv_rmse, eiv_logdens, eiv_bias
-
-noneiv_rmse_collection = []
-noneiv_logdens_collection = []
-noneiv_bias_collection = []
-eiv_rmse_collection = []
-eiv_logdens_collection = []
-eiv_bias_collection = []
+    if noise_state:
+        net.noise_on()
+    else:
+        net.noise_off()
+    return noneiv_metrics, eiv_metrics
+
+
+collection_keys = ['rmse','logdens','bias','coverage_numerical','coverage_theory','coverage_normalized','res_std']
+noneiv_metrics_collection = {}
+eiv_metrics_collection = {}
+for key in collection_keys:
+    noneiv_metrics_collection[key] = []
+    eiv_metrics_collection[key] = []
 num_test_epochs = 10
 assert train_noneiv.seed_list == train_eiv.seed_list
 seed_list = train_noneiv.seed_list
@@ -158,34 +166,23 @@ max_batch_number = 2
 for seed in tqdm(seed_list):
     train_data, test_data = load_data(seed=seed)
     test_dataloader = DataLoader(test_data,
-            batch_size=int(np.max((len(test_data),
+            batch_size=int(np.min((len(test_data),
         800))), shuffle=True)
     for i in tqdm(range(num_test_epochs)):
         for j, (x,y) in enumerate(test_dataloader):
             if j > max_batch_number:
                 break
-            noneiv_rmse, noneiv_logdens, noneiv_bias, \
-                    eiv_rmse, eiv_logdens, eiv_bias =\
-                    collect_metrics(x,y, seed=seed)
-            noneiv_rmse_collection.append(noneiv_rmse)
-            noneiv_logdens_collection.append(noneiv_logdens)
-            noneiv_bias_collection.append(noneiv_bias)
-            eiv_rmse_collection.append(eiv_rmse)
-            eiv_logdens_collection.append(eiv_logdens)
-            eiv_bias_collection.append(eiv_bias)
-
-
-print('Non-EiV')
-print(f'RMSE {np.mean(noneiv_rmse_collection):.5f}'\
-        f'({np.std(noneiv_rmse_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
-print(f'LogDens {np.mean(noneiv_logdens_collection):.5f}'\
-        f'({np.std(noneiv_logdens_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
-print(f'Bias {np.mean(noneiv_bias_collection):.5f}'\
-        f'({np.std(noneiv_bias_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
-print('EiV')
-print(f'RMSE {np.mean(eiv_rmse_collection):.5f}'\
-        f'({np.std(eiv_rmse_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
-print(f'LogDens {np.mean(eiv_logdens_collection):.5f}'\
-        f'({np.std(eiv_logdens_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
-print(f'Bias {np.mean(eiv_bias_collection):.5f}'\
-        f'({np.std(eiv_bias_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
+
+            noneiv_metrics, eiv_metrics = collect_metrics(x,y, seed=seed)
+            for key in collection_keys:
+                noneiv_metrics_collection[key].append(noneiv_metrics[key])
+                eiv_metrics_collection[key].append(eiv_metrics[key])
+
+print('Non-EiV\n-----')
+for key in collection_keys:
+    print(f'{key} {np.mean(noneiv_metrics_collection[key]):.5f}'\
+            f'({np.std(noneiv_metrics_collection[key])/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
+print('EiV\n-----')
+for key in collection_keys:
+    print(f'{key} {np.mean(eiv_metrics_collection[key]):.5f}'\
+            f'({np.std(eiv_metrics_collection[key])/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
diff --git a/Experiments/train_eiv_california.py b/Experiments/train_eiv_california.py
index 39c95fbe0cad3b7f839a8c230e9c54e5bfcde8f3..f90a204c2134254078ea55b76e5009ceb19d3402 100644
--- a/Experiments/train_eiv_california.py
+++ b/Experiments/train_eiv_california.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_concrete.py b/Experiments/train_eiv_concrete.py
index 0cc42d5e05181c471b8281c2ff98eefc1a08aa41..e637d359d9a7eed1dd87f40d3dfd8709040298cf 100644
--- a/Experiments/train_eiv_concrete.py
+++ b/Experiments/train_eiv_concrete.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_energy.py b/Experiments/train_eiv_energy.py
index 40be4b283ed12849418e6c760618f79a70122bda..304501b089043b8ea701f088e9859b655171dbd2 100644
--- a/Experiments/train_eiv_energy.py
+++ b/Experiments/train_eiv_energy.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_kin8nm.py b/Experiments/train_eiv_kin8nm.py
index 58d191c062f61eed6a4725bb7f364ed0303aeaaa..96b6c79c9316978e656ba901ff2f3fd6b0814026 100644
--- a/Experiments/train_eiv_kin8nm.py
+++ b/Experiments/train_eiv_kin8nm.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_msd.py b/Experiments/train_eiv_msd.py
index 16e617f0a64fb8f9567ac2846a22cefff109b901..4b533b691d5c052d2e28a43eac3c89caca1e70fa 100644
--- a/Experiments/train_eiv_msd.py
+++ b/Experiments/train_eiv_msd.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_naval.py b/Experiments/train_eiv_naval.py
index 8db266c735d022fe4a2170a7de4d0a5ba3610d74..f4a9aa75a4b8e00dce105764544b2b2a9d9ad192 100644
--- a/Experiments/train_eiv_naval.py
+++ b/Experiments/train_eiv_naval.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_power.py b/Experiments/train_eiv_power.py
index 2ddf75af75db7607a21cef6e212c9601d391b421..d9be09e4b7129599a41a784ed66e79f795fe67b8 100644
--- a/Experiments/train_eiv_power.py
+++ b/Experiments/train_eiv_power.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_protein.py b/Experiments/train_eiv_protein.py
index 3801d5e07572a7f37b23b457a3adc8b83f3f8418..625fb6a3620af1fd5aef227895ae39ebc7095c91 100644
--- a/Experiments/train_eiv_protein.py
+++ b/Experiments/train_eiv_protein.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_wine.py b/Experiments/train_eiv_wine.py
index ca561adc2e4d3f9a37060d90b370d21a06ed01d0..d6c9a41f9737005330c6aca56bf060a94a71ad56 100644
--- a/Experiments/train_eiv_wine.py
+++ b/Experiments/train_eiv_wine.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_eiv_yacht.py b/Experiments/train_eiv_yacht.py
index 2c9ee88853d77b99fefc3f2507ad827597895d61..10e2607ea151ff071fd1ca8ebee8e8ccc6885b6a 100644
--- a/Experiments/train_eiv_yacht.py
+++ b/Experiments/train_eiv_yacht.py
@@ -70,6 +70,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_california.py b/Experiments/train_noneiv_california.py
index 773a30ddf5d50d3a7d2fff2c58e6a9579ba24992..f359b4303aff0536ae52320c3e138478ac5da2ea 100644
--- a/Experiments/train_noneiv_california.py
+++ b/Experiments/train_noneiv_california.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_concrete.py b/Experiments/train_noneiv_concrete.py
index d4a48a8dc6fc7c0273ea4f894d63d38acc931d7a..5e7c1e33ea750b5ddc661471969682ecab2008cd 100644
--- a/Experiments/train_noneiv_concrete.py
+++ b/Experiments/train_noneiv_concrete.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_energy.py b/Experiments/train_noneiv_energy.py
index 106bc14e5802054f0bcfdbfdc3d7f4ebbd13d3ab..340b81ad389e443a2cc67a6e02388f463b735b0f 100644
--- a/Experiments/train_noneiv_energy.py
+++ b/Experiments/train_noneiv_energy.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_kin8nm.py b/Experiments/train_noneiv_kin8nm.py
index 90af665059e0eca165cfb97fd991ba877fcfbad0..7070c381f25135038a4e6b7ca5058d7efbccfb7f 100644
--- a/Experiments/train_noneiv_kin8nm.py
+++ b/Experiments/train_noneiv_kin8nm.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_msd.py b/Experiments/train_noneiv_msd.py
index f80c03e44ba37c0028806725e2beed1001ab0699..b72644d9e964d15e8e6fbf97a6b4f7e43226bce1 100644
--- a/Experiments/train_noneiv_msd.py
+++ b/Experiments/train_noneiv_msd.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_naval.py b/Experiments/train_noneiv_naval.py
index f96c66e1c20425042726f3c22c27285ed8b085d1..fb8c57f6774981d5099f8c18f2b6e0dbba58af5e 100644
--- a/Experiments/train_noneiv_naval.py
+++ b/Experiments/train_noneiv_naval.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_power.py b/Experiments/train_noneiv_power.py
index c63b5713cb6fd1a8dd1b9d0ea0d5e6a4bcec510f..3c87a2d74e27f09692d393366b0bacd8396c2881 100644
--- a/Experiments/train_noneiv_power.py
+++ b/Experiments/train_noneiv_power.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_protein.py b/Experiments/train_noneiv_protein.py
index ca73d6af733bffd469da898d8bd43c958068586b..9e2d3f4095520182cc3b8f859c746c691f14722b 100644
--- a/Experiments/train_noneiv_protein.py
+++ b/Experiments/train_noneiv_protein.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_wine.py b/Experiments/train_noneiv_wine.py
index dc239f74fda492662853d92859e355585f476db0..a445c1e87b3e72069614b4d4bb3f5f31a8477566 100644
--- a/Experiments/train_noneiv_wine.py
+++ b/Experiments/train_noneiv_wine.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')
diff --git a/Experiments/train_noneiv_yacht.py b/Experiments/train_noneiv_yacht.py
index d163e544f50bdc09283d138ba7149b5290f2a515..489a81c146a1e63a679c1235c590add5be8785ca 100644
--- a/Experiments/train_noneiv_yacht.py
+++ b/Experiments/train_noneiv_yacht.py
@@ -69,6 +69,8 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         rmse = self.rmse(net).item()
         rmse_chain.append(rmse)
         writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('std_y', self.last_std_y, self.total_count)
+        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
         writer.add_scalar('train loss', self.last_train_loss, self.total_count)
         writer.add_scalar('test loss', self.last_test_loss, self.total_count)
         print(f'RMSE {rmse:.3f}')