diff --git a/EIVPackage/EIVArchitectures/Networks.py b/EIVPackage/EIVArchitectures/Networks.py
index 3a1c35c28d6b46f042d4e955d38fc77716868066..2d8186563e6d669ba77603e77e507c411260910f 100644
--- a/EIVPackage/EIVArchitectures/Networks.py
+++ b/EIVPackage/EIVArchitectures/Networks.py
@@ -279,6 +279,61 @@ class FNNBer(nn.Module):
             sigma = torch.mean(sigma, dim=1)
         return pred, sigma
 
+    def predictive_logdensity(self, x, y, number_of_draws=100, remove_graph=True,
+            average_batch_dimension=True, scale_labels=None,
+            decouple_dimensions=False):
+        """
+        Computes the logarithm of the predictive density evaluated at `y`. If
+        `average_batch_dimension` is `True` these values will be averaged over
+        the batch dimension.
+        :param x: A torch.tensor, the input
+        :param y: A torch.tensor, labels on which to evaluate the density
+        :param number_of_draws: Number of draws to obtain from x
+        :param remove_graph: If True (default) the output will 
+        be detached to save memory
+        :param average_batch_dimension: Boolean. If True (default) the values
+        will be averaged over the batch dimension. If False, the batch
+        dimension will be left untouched and all values will be returned.
+        """
+        out, sigmas = self.predict(x, number_of_draws=number_of_draws,
+                take_average_of_prediction=False, remove_graph=remove_graph)
+        # Add "repetition" dimension to y and out
+        y = y[:,None,...]
+        sigmas = sigmas[:,None,...]
+        if len(y.shape) <= 2:
+            # add an output axis if necessary
+            y = y[...,None]
+            sigmas = sigmas[...,None]
+        # squeeze last dimensions into one
+        y = y.view((*y.shape[:2], -1))
+        sigmas = sigmas.view((*sigmas.shape[:2], -1))
+        out = out.view((*out.shape[:2], -1))
+        # check if dimensions consistent
+        assert y.shape == sigmas.shape
+        assert y.shape[0] == out.shape[0]
+        assert y.shape[2] == out.shape[2]
+        if scale_labels is not None:
+            extended_scale_labels = scale_labels.flatten()[None,None,:]
+            out = out * extended_scale_labels
+            y = y * extended_scale_labels
+            sigmas = sigmas * extended_scale_labels
+        # exponential argument for density
+        if not decouple_dimensions:
+            exp_arg =  torch.sum(-1/(2*sigmas**2) * (y-out)**2-\
+                        1/2 * torch.log(2 * torch.pi * sigmas**2), dim=2)
+        else:
+            exp_arg =  -1/(2*sigmas**2) * (y-out)**2-\
+                            1/2 * torch.log(2 * torch.pi * sigmas**2)
+        # average over parameter values
+        predictive_log_density_values = \
+                torch.logsumexp(input=exp_arg, dim=1)\
+                    - torch.log(torch.tensor(number_of_draws)) 
+        if average_batch_dimension:
+            return torch.mean(predictive_log_density_values, dim=0)
+        else:
+            return predictive_log_density_values
+
+
 class SmallFNNBer(FNNBer):
     """
     A fully connected net Bernoulli dropout layers.
diff --git a/Experiments/evaluate_energy.py b/Experiments/evaluate_energy.py
index f37b094c31ab9588f451ec108381b2431e095392..548596afccefd1336fa6e034f33252ed3003a6cb 100644
--- a/Experiments/evaluate_energy.py
+++ b/Experiments/evaluate_energy.py
@@ -43,3 +43,16 @@ scaled_res = scaled_res.detach().cpu().numpy().flatten()
 rmse = np.sqrt(np.mean(scaled_res**2)) 
 print(f'RMSE {rmse:.3f}')
 
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/evaluate_kin8nm.py b/Experiments/evaluate_kin8nm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e11f441d81a1b394b1833935333d3e92a428d370
--- /dev/null
+++ b/Experiments/evaluate_kin8nm.py
@@ -0,0 +1,58 @@
+import os
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.kin8nm import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+from train_noneiv_kin8nm import p, init_std_y_list, seed_list, unscaled_reg, hidden_layers
+
+
+train_data, test_data = load_data()
+test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data), 800))))
+
+seed = seed_list[0]
+init_std_y = init_std_y_list[0]
+saved_file = os.path.join('saved_networks',
+            f'noneiv_kin8nm'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+        h=[input_dim, *hidden_layers, output_dim])
+train_and_store.open_stored_training(saved_file=saved_file,
+        net=net)
+
+
+# RMSE
+x,y = next(iter(test_dataloader))
+out = net(x)[0]
+if len(y.shape) <=1:
+    y = y.view((-1,1))
+assert y.shape == out.shape
+res = y-out
+scale = train_data.dataset.std_labels
+scaled_res = res * scale.view((1,-1))
+scaled_res = scaled_res.detach().cpu().numpy().flatten()
+rmse = np.sqrt(np.mean(scaled_res**2)) 
+print(f'RMSE {rmse:.3f}')
+
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/evaluate_naval.py b/Experiments/evaluate_naval.py
new file mode 100644
index 0000000000000000000000000000000000000000..55e22d3c4a6d780dece45a36dbd7fa19bf01ae71
--- /dev/null
+++ b/Experiments/evaluate_naval.py
@@ -0,0 +1,81 @@
+import os
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.naval_propulsion import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+from train_noneiv_naval import p, init_std_y_list, seed_list, unscaled_reg, hidden_layers
+
+
+train_data, test_data = load_data()
+test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data), 800))))
+
+seed = seed_list[0]
+init_std_y = init_std_y_list[0]
+saved_file = os.path.join('saved_networks',
+            f'noneiv_naval'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+        h=[input_dim, *hidden_layers, output_dim])
+train_and_store.open_stored_training(saved_file=saved_file,
+        net=net)
+
+
+# # RMSE
+# x,y = next(iter(test_dataloader))
+# training_state = net.training
+# net.eval()
+# out = net(x)[0]
+# if len(y.shape) <=1:
+#     y = y.view((-1,1))
+# assert y.shape == out.shape
+# res = y-out
+# if training_state:
+#     net.train()
+# scale = train_data.dataset.std_labels
+# scaled_res = res * scale.view((1,-1))
+# scaled_res = scaled_res.detach().cpu().numpy().flatten()
+# rmse = np.sqrt(np.mean(scaled_res**2)) 
+# print(f'no Dropout RMSE {rmse:.3f}')
+
+# # RMSE with prediction
+# x,y = next(iter(test_dataloader))
+# training_state = net.training
+# net.train()
+# out = net.predict(x, number_of_draws=100)[0]
+# if len(y.shape) <=1:
+#     y = y.view((-1,1))
+# assert y.shape == out.shape
+# res = y-out
+# if training_state:
+#     net.train()
+# else:
+#     net.eval()
+# scale = train_data.dataset.std_labels
+# scaled_res = res * scale.view((1,-1))
+# scaled_res = scaled_res.detach().cpu().numpy().flatten()
+# rmse = np.sqrt(np.mean(scaled_res**2)) 
+# print(f'Dropout predictive RMSE {rmse:.3f}')
+
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/evaluate_protein.py b/Experiments/evaluate_protein.py
new file mode 100644
index 0000000000000000000000000000000000000000..985eee2e9c81df3b385a7dad813cdb147eb3c81d
--- /dev/null
+++ b/Experiments/evaluate_protein.py
@@ -0,0 +1,58 @@
+import os
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.protein_structure import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+from train_noneiv_protein import p, init_std_y_list, seed_list, unscaled_reg, hidden_layers
+
+
+train_data, test_data = load_data()
+test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data), 800))))
+
+seed = seed_list[0]
+init_std_y = init_std_y_list[0]
+saved_file = os.path.join('saved_networks',
+            f'noneiv_protein'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+        h=[input_dim, *hidden_layers, output_dim])
+train_and_store.open_stored_training(saved_file=saved_file,
+        net=net)
+
+
+# RMSE
+x,y = next(iter(test_dataloader))
+out = net(x)[0]
+if len(y.shape) <=1:
+    y = y.view((-1,1))
+assert y.shape == out.shape
+res = y-out
+scale = train_data.dataset.std_labels
+scaled_res = res * scale.view((1,-1))
+scaled_res = scaled_res.detach().cpu().numpy().flatten()
+rmse = np.sqrt(np.mean(scaled_res**2)) 
+print(f'RMSE {rmse:.3f}')
+
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/evaluate_wine.py b/Experiments/evaluate_wine.py
new file mode 100644
index 0000000000000000000000000000000000000000..3031b8483b8ff380554eb1bfba649f5eb9206c7e
--- /dev/null
+++ b/Experiments/evaluate_wine.py
@@ -0,0 +1,58 @@
+import os
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.wine_quality import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+from train_noneiv_wine import p, init_std_y_list, seed_list, unscaled_reg, hidden_layers
+
+
+train_data, test_data = load_data()
+test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data), 800))))
+
+seed = seed_list[0]
+init_std_y = init_std_y_list[0]
+saved_file = os.path.join('saved_networks',
+            f'noneiv_wine'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+        h=[input_dim, *hidden_layers, output_dim])
+train_and_store.open_stored_training(saved_file=saved_file,
+        net=net)
+
+
+# RMSE
+x,y = next(iter(test_dataloader))
+out = net(x)[0]
+if len(y.shape) <=1:
+    y = y.view((-1,1))
+assert y.shape == out.shape
+res = y-out
+scale = train_data.dataset.std_labels
+scaled_res = res * scale.view((1,-1))
+scaled_res = scaled_res.detach().cpu().numpy().flatten()
+rmse = np.sqrt(np.mean(scaled_res**2)) 
+print(f'RMSE {rmse:.3f}')
+
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/evaluate_yacht.py b/Experiments/evaluate_yacht.py
new file mode 100644
index 0000000000000000000000000000000000000000..6db62c48594069597f9e009ab3e9f0903263531b
--- /dev/null
+++ b/Experiments/evaluate_yacht.py
@@ -0,0 +1,58 @@
+import os
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.yacht_hydrodynamics import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+from train_noneiv_yacht import p, init_std_y_list, seed_list, unscaled_reg, hidden_layers
+
+
+train_data, test_data = load_data()
+test_dataloader = DataLoader(test_data, batch_size=int(np.max((len(test_data), 800))))
+
+seed = seed_list[0]
+init_std_y = init_std_y_list[0]
+saved_file = os.path.join('saved_networks',
+            f'noneiv_yacht'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+
+input_dim = train_data[0][0].numel()
+output_dim = train_data[0][1].numel()
+net = Networks.FNNBer(p=p, init_std_y=init_std_y,
+        h=[input_dim, *hidden_layers, output_dim])
+train_and_store.open_stored_training(saved_file=saved_file,
+        net=net)
+
+
+# RMSE
+x,y = next(iter(test_dataloader))
+out = net(x)[0]
+if len(y.shape) <=1:
+    y = y.view((-1,1))
+assert y.shape == out.shape
+res = y-out
+scale = train_data.dataset.std_labels
+scaled_res = res * scale.view((1,-1))
+scaled_res = scaled_res.detach().cpu().numpy().flatten()
+rmse = np.sqrt(np.mean(scaled_res**2)) 
+print(f'RMSE {rmse:.3f}')
+
+
+# NLL
+x,y = next(iter(test_dataloader))
+training_state = net.training
+net.train()
+logdens = net.predictive_logdensity(x, y, number_of_draws=100,
+        decouple_dimensions=True,
+        scale_labels=train_data.dataset.std_labels.view((-1,))).mean()
+if training_state:
+    net.train()
+else:
+    net.eval()
+print(f'Dropout predictive {logdens:.3f}')
diff --git a/Experiments/train_noneiv_kin8nm.py b/Experiments/train_noneiv_kin8nm.py
new file mode 100644
index 0000000000000000000000000000000000000000..0baa2572ec56d985a5538b42c151d8989c645c95
--- /dev/null
+++ b/Experiments/train_noneiv_kin8nm.py
@@ -0,0 +1,147 @@
+"""
+Train non-EiV model on the kin8nm dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.kin8nm import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 600
+number_of_epochs = 30
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 20
+# pretraining = 300
+epoch_offset = 19
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_kin8nm'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_kin8nm_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+
diff --git a/Experiments/train_noneiv_msd.py b/Experiments/train_noneiv_msd.py
new file mode 100644
index 0000000000000000000000000000000000000000..a38b5215ffc0d6827290abe44160e50f1c0b4bc8
--- /dev/null
+++ b/Experiments/train_noneiv_msd.py
@@ -0,0 +1,145 @@
+"""
+Train non-EiV model on the million song dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.million_song import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 100
+test_batch_size = 600
+number_of_epochs = 10
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 4
+# pretraining = 300
+epoch_offset = 4
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_msd'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_msd_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_naval.py b/Experiments/train_noneiv_naval.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b03843f8c9f4277bf9f67b55329a58a8aa2ca3f
--- /dev/null
+++ b/Experiments/train_noneiv_naval.py
@@ -0,0 +1,145 @@
+"""
+Train non-EiV model on the naval propulsion dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.naval_propulsion import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 600
+number_of_epochs = 30
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 20
+# pretraining = 300
+epoch_offset = 20
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_naval'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_naval_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_protein.py b/Experiments/train_noneiv_protein.py
new file mode 100644
index 0000000000000000000000000000000000000000..55db23c306d5e46f07926edd9ba53f2aa5b2d759
--- /dev/null
+++ b/Experiments/train_noneiv_protein.py
@@ -0,0 +1,147 @@
+"""
+Train non-EiV model on protein structure dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.protein_structure import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 100
+test_batch_size = 600
+number_of_epochs = 30
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 10
+# pretraining = 300
+epoch_offset = 10
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_protein'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_protein_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+
diff --git a/Experiments/train_noneiv_wine.py b/Experiments/train_noneiv_wine.py
new file mode 100644
index 0000000000000000000000000000000000000000..837eb720461dbc35267f1cd46a05e86699e86f09
--- /dev/null
+++ b/Experiments/train_noneiv_wine.py
@@ -0,0 +1,147 @@
+"""
+Train non-EiV model on wine quality dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.wine_quality import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 800
+number_of_epochs = 100
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 30
+# pretraining = 300
+epoch_offset = 50
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_wine'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_wine_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)
+
+
diff --git a/Experiments/train_noneiv_yacht.py b/Experiments/train_noneiv_yacht.py
new file mode 100644
index 0000000000000000000000000000000000000000..7640817821e90ec82854ac9b92db7ad4291ad744
--- /dev/null
+++ b/Experiments/train_noneiv_yacht.py
@@ -0,0 +1,145 @@
+"""
+Train non-EiV model on the yacht hydrodynamics dataset using different seeds
+"""
+import random
+import os
+
+import numpy as np
+import torch
+import torch.backends.cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard.writer import SummaryWriter
+
+from EIVArchitectures import Networks, initialize_weights
+from EIVData.yacht_hydrodynamics import load_data
+from EIVTrainingRoutines import train_and_store, loss_functions
+
+# hyperparameters
+lr = 1e-3
+batch_size = 32
+test_batch_size = 600
+number_of_epochs = 1200
+unscaled_reg = 10
+report_point = 5
+p = 0.2
+lr_update = 200
+# pretraining = 300
+epoch_offset = 250
+init_std_y_list = [0.5]
+gamma = 0.5
+hidden_layers = [1024, 1024, 1024, 1024]
+device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+
+# reproducability
+def set_seeds(seed):
+    torch.backends.cudnn.benchmark = False
+    np.random.seed(seed)
+    random.seed(seed) 
+    torch.manual_seed(seed)
+seed_list = [0,]
+
+# to store the RMSE
+rmse_chain = []
+
+class UpdatedTrainEpoch(train_and_store.TrainEpoch):
+    def pre_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch == 0:
+            self.lr = self.initial_lr
+            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
+            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
+            self.optimizer, lr_update, gamma)
+
+
+    def post_epoch_update(self, net, epoch):
+        """
+        Overwrites the corresponding method
+        """
+        if epoch >= epoch_offset:
+            net.std_y_par.requires_grad = True
+        self.lr_scheduler.step() 
+
+    def extra_report(self, net, i):
+        """
+        Overwrites the corresponding method
+        and fed after initialization of this class
+        """
+        rmse = self.rmse(net).item()
+        rmse_chain.append(rmse)
+        writer.add_scalar('RMSE', rmse, self.total_count)
+        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
+        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
+        print(f'RMSE {rmse:.3f}')
+
+    def rmse(self, net):
+        """
+        Compute the root mean squared error for `net`
+        """
+        net_train_state = net.training
+        net.eval()
+        x, y = next(iter(self.test_dataloader))
+        if len(y.shape) <= 1:
+            y = y.view((-1,1))
+        out = net(x.to(device))[0].detach().cpu()
+        assert out.shape == y.shape
+        if net_train_state:
+            net.train()
+        return torch.sqrt(torch.mean((out-y)**2))
+
+def train_on_data(init_std_y, seed):
+    """
+    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
+    `init_std_y`.
+    """
+    # set seed
+    set_seeds(seed)
+    # load Datasets
+    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
+            normalize=True)
+    # make dataloaders
+    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
+            shuffle=True)
+    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
+            shuffle=True)
+    # create a net
+    input_dim = train_data[0][0].numel()
+    output_dim = train_data[0][1].numel()
+    net = Networks.FNNBer(p=p,
+            init_std_y=init_std_y,
+            h=[input_dim, *hidden_layers, output_dim])
+    net.apply(initialize_weights.glorot_init)
+    net = net.to(device)
+    net.std_y_par.requires_grad = False
+    std_x_map = lambda: 0.0
+    std_y_map = lambda: net.get_std_y().detach().cpu().item()
+    # regularization
+    reg = unscaled_reg/len(train_data)
+    # create epoch_map
+    criterion = loss_functions.nll_reg_loss
+    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
+            test_dataloader=test_dataloader,
+            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
+            lr=lr, reg=reg, report_point=report_point, device=device)
+    # run and save
+    save_file = os.path.join('saved_networks',
+            f'noneiv_yacht'\
+                    f'init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
+                    f'_p_{p:.2f}_seed_{seed}.pkl')
+    train_and_store.train_and_store(net=net, 
+            epoch_map=epoch_map,
+            number_of_epochs=number_of_epochs,
+            save_file=save_file)
+    
+
+if __name__ == '__main__':
+    for seed in seed_list:
+        # Tensorboard monitoring
+        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
+                f'run_noneiv_yacht_lr_{lr:.4f}_seed'\
+                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
+        print(f'>>>>SEED: {seed}')
+        for init_std_y in init_std_y_list:
+            print(f'Using init_std_y={init_std_y:.3f}')
+            train_on_data(init_std_y, seed)