diff --git a/EIVPackage/EIVData/concrete_strength.py b/EIVPackage/EIVData/concrete_strength.py
index 90bc1ff0b247786231888528d6cb702ad923f072..d6d13c4446dc8f5fd3b52690d7382d9dc8ba3b25 100644
--- a/EIVPackage/EIVData/concrete_strength.py
+++ b/EIVPackage/EIVData/concrete_strength.py
@@ -12,7 +12,7 @@ def load_data(seed=0, splitting_part=0.8, normalize=True):
     :normalize: Whether to normalize the data, defaults to True.
     :returns: concrete_trainset, concrete_testset
     """
-    concrete_dataset = CSVData('~/SharedData/AI/datasets/concrete_compression_strength/compresive_strength_concrete.csv',
+    concrete_dataset = CSVData('~/SharedData/AI/datasets/concrete_compression_strength/compressive_strength_concrete.csv',
             class_name='Concrete compressive strength(MPa, megapascals) ',
             shuffle_seed=seed,
             normalize=normalize)
diff --git a/Experiments/evaluate_tabular.py b/Experiments/evaluate_tabular.py
index 54b207893dd8ebefd9e83baa7fe9d3eeef003f62..d74c553789953779c12fee3a70f02de4b9d08328 100644
--- a/Experiments/evaluate_tabular.py
+++ b/Experiments/evaluate_tabular.py
@@ -26,7 +26,7 @@ input_dim = train_data[0][0].numel()
 output_dim = train_data[0][1].numel()
 
 def collect_metrics(x,y, seed=0,
-    noneiv_number_of_draws=100, eiv_number_of_draws=[100,1],
+    noneiv_number_of_draws=100, eiv_number_of_draws=[100,5],
     decouple_dimensions=False, device=torch.device('cuda:1')):
     """
     :param x: A torch.tensor, taken as input
@@ -40,7 +40,8 @@ def collect_metrics(x,y, seed=0,
     of Gal et al. is followed where, in the evaluation of the
     log-posterior-predictive, each dimension is treated independently and then
     averaged. If False (default), a multivariate distribution is used.
-    :returns: noneiv_rmse, noneiv_logdens,eiv_rmse, eiv_logdens
+    :returns: noneiv_rmse, noneiv_logdens, noneiv_bias,
+    eiv_rmse, eiv_logdens, eiv_bias
     """
     x,y = x.to(device), y.to(device)
     init_std_y = train_noneiv.init_std_y_list[0]
@@ -70,6 +71,7 @@ def collect_metrics(x,y, seed=0,
     scaled_res = res * scale.view((1,-1))
     scaled_res = scaled_res.detach().cpu().numpy().flatten()
     noneiv_rmse = np.sqrt(np.mean(scaled_res**2))
+    noneiv_bias = np.mean(scaled_res)
 
 
     # NLL
@@ -92,7 +94,7 @@ def collect_metrics(x,y, seed=0,
     hidden_layers = train_eiv.hidden_layers
     fixed_std_x = train_eiv.fixed_std_x
     saved_file = os.path.join('saved_networks',
-            f'eiv_energy'\
+            f'eiv_{short_dataname}'\
                     f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
                     f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
                     f'_seed_{seed}.pkl')
@@ -116,6 +118,7 @@ def collect_metrics(x,y, seed=0,
     scaled_res = res * scale.view((1,-1))
     scaled_res = scaled_res.detach().cpu().numpy().flatten()
     eiv_rmse = np.sqrt(np.mean(scaled_res**2))
+    eiv_bias = np.mean(scaled_res)
     if training_state:
         net.train()
     else:
@@ -139,28 +142,37 @@ def collect_metrics(x,y, seed=0,
         net.train()
     else:
         net.eval()
-    return noneiv_rmse, noneiv_logdens, eiv_rmse, eiv_logdens
+    return noneiv_rmse, noneiv_logdens, noneiv_bias, \
+            eiv_rmse, eiv_logdens, eiv_bias
 
 noneiv_rmse_collection = []
 noneiv_logdens_collection = []
+noneiv_bias_collection = []
 eiv_rmse_collection = []
 eiv_logdens_collection = []
-num_test_epochs = 30
+eiv_bias_collection = []
+num_test_epochs = 10
 assert train_noneiv.seed_list == train_eiv.seed_list
 seed_list = train_noneiv.seed_list
+max_batch_number = 2
 for seed in tqdm(seed_list):
     train_data, test_data = load_data(seed=seed)
     test_dataloader = DataLoader(test_data,
             batch_size=int(np.max((len(test_data),
         800))), shuffle=True)
     for i in tqdm(range(num_test_epochs)):
-        for x,y in test_dataloader:
-            noneiv_rmse, noneiv_logdens, eiv_rmse, eiv_logdens =\
+        for j, (x,y) in enumerate(test_dataloader):
+            if j > max_batch_number:
+                break
+            noneiv_rmse, noneiv_logdens, noneiv_bias, \
+                    eiv_rmse, eiv_logdens, eiv_bias =\
                     collect_metrics(x,y, seed=seed)
             noneiv_rmse_collection.append(noneiv_rmse)
             noneiv_logdens_collection.append(noneiv_logdens)
+            noneiv_bias_collection.append(noneiv_bias)
             eiv_rmse_collection.append(eiv_rmse)
             eiv_logdens_collection.append(eiv_logdens)
+            eiv_bias_collection.append(eiv_bias)
 
 
 print('Non-EiV')
@@ -168,8 +180,12 @@ print(f'RMSE {np.mean(noneiv_rmse_collection):.5f}'\
         f'({np.std(noneiv_rmse_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
 print(f'LogDens {np.mean(noneiv_logdens_collection):.5f}'\
         f'({np.std(noneiv_logdens_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
+print(f'Bias {np.mean(noneiv_bias_collection):.5f}'\
+        f'({np.std(noneiv_bias_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
 print('EiV')
 print(f'RMSE {np.mean(eiv_rmse_collection):.5f}'\
         f'({np.std(eiv_rmse_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
 print(f'LogDens {np.mean(eiv_logdens_collection):.5f}'\
         f'({np.std(eiv_logdens_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
+print(f'Bias {np.mean(eiv_bias_collection):.5f}'\
+        f'({np.std(eiv_bias_collection)/np.sqrt(num_test_epochs*len(seed_list)):.5f})')
diff --git a/Experiments/train_eiv_concrete.py b/Experiments/train_eiv_concrete.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cc42d5e05181c471b8281c2ff98eefc1a08aa41
--- /dev/null
+++ b/Experiments/train_eiv_concrete.py
@@ -0,0 +1,155 @@
+"""
+Train EiV model on concrete strength dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.concrete_strength import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 800
+number_of_epochs = 100
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 20
+# pretraining = 300
+epoch_offset = 10
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+fixed_std_x = 0.05
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = range(10)
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net_noise_state = net.noise_is_on
+        net.eval()
+        net.noise_off()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        if net_noise_state:
+            net.noise_on()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNEIV(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim],
+            fixed_std_x=fixed_std_x)
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: net.get_std_x().detach().cpu().item()
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_eiv
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'eiv_concrete'\
+                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
+                    f'_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_eiv_concrete_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
+                f'_fixed_std_x_{fixed_std_x:.3f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+
diff --git a/Experiments/train_eiv_kin8nm.py b/Experiments/train_eiv_kin8nm.py
new file mode 100644
index 0000000000000000000000000000000000000000..58d191c062f61eed6a4725bb7f364ed0303aeaaa
--- /dev/null
+++ b/Experiments/train_eiv_kin8nm.py
@@ -0,0 +1,155 @@
+"""
+Train EiV model on the kin8nm dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.kin8nm import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 600
+number_of_epochs = 30
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 20
+# pretraining = 300
+epoch_offset = 19
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+fixed_std_x = 0.05
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = range(10)
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net_noise_state = net.noise_is_on
+        net.eval()
+        net.noise_off()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        if net_noise_state:
+            net.noise_on()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNEIV(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim],
+            fixed_std_x=fixed_std_x)
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: net.get_std_x().detach().cpu().item()
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_eiv
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'eiv_kin8nm'\
+                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
+                    f'_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_eiv_kin8nm_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
+                f'_fixed_std_x_{fixed_std_x:.3f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+
diff --git a/Experiments/train_eiv_naval.py b/Experiments/train_eiv_naval.py
new file mode 100644
index 0000000000000000000000000000000000000000..8db266c735d022fe4a2170a7de4d0a5ba3610d74
--- /dev/null
+++ b/Experiments/train_eiv_naval.py
@@ -0,0 +1,155 @@
+"""
+Train EiV model on the naval propulsion dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.naval_propulsion import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 600
+number_of_epochs = 30
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 20
+# pretraining = 300
+epoch_offset = 20
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+fixed_std_x = 0.05
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = range(10)
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net_noise_state = net.noise_is_on
+        net.eval()
+        net.noise_off()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        if net_noise_state:
+            net.noise_on()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNEIV(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim],
+            fixed_std_x=fixed_std_x)
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: net.get_std_x().detach().cpu().item()
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_eiv
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'eiv_naval'\
+                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
+                    f'_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_eiv_naval_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
+                f'_fixed_std_x_{fixed_std_x:.3f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+