diff --git a/EIVPackage/EIVArchitectures/Networks.py b/EIVPackage/EIVArchitectures/Networks.py
index 6acace59b7abef559bd631bdfaae915e6d481129..d9c0f1ba854549882ba22499232e8d33e1619334 100644
--- a/EIVPackage/EIVArchitectures/Networks.py
+++ b/EIVPackage/EIVArchitectures/Networks.py
@@ -27,20 +27,24 @@ class FNNEIV(nn.Module):
     :param repetition: Positive integer, the default value for repeating input,
     defaults to 1.  For a single call this can also be specified in the forward
     method.
+    :param std_y_requires_grad: Whether `sigma_y` will require_grad and thus
+    be updated during optimization. Defaults to False.
     **Note**: 
     - To change the deming factor afterwards, use the method `change_deming`
     - To change fixed_std_x afterwards, use the method `change_fixed_std_x`
+    - To change std_y use the method `change_std_x`
     """
     LeakyReLUSlope = 1e-2
     def __init__(self, p = 0.2, init_std_y=1.0, precision_prior_zeta=0.0, 
             deming=1.0, h=[10, 1024,1024,1024,1024, 1], 
-            fixed_std_x = None, repetition = 1):
+            fixed_std_x = None, repetition = 1, std_y_requires_grad = False):
         super().__init__()
         # part before Bernoulli dropout
         self.init_std_y = init_std_y
-        InverseSoftplus = lambda sigma: torch.log(torch.exp(sigma) - 1 )
+        self.InverseSoftplus = lambda sigma: torch.log(torch.exp(sigma) - 1 )
         self.std_y_par = nn.parameter.Parameter(
-                InverseSoftplus(torch.tensor([init_std_y])))
+                self.InverseSoftplus(torch.tensor([init_std_y])))
+        self.std_y_par.requires_grad = std_y_requires_grad
         self._repetition = repetition
         self.main = nn.Sequential(
                 EIVInput(precision_prior_zeta=precision_prior_zeta, 
@@ -87,6 +91,18 @@ class FNNEIV(nn.Module):
                 fixed_std_x = torch.tensor(fixed_std_x)
         self._fixed_std_x = fixed_std_x
 
+    def change_std_y(self, std_y):
+        """
+        Update internal std_y to `std_y`
+        :param std_y: A singular, positive torch.tensor
+        """
+        assert std_y.numel() == 1
+        std_y = std_y.view((1,))
+        print('Updating std_y from %.3f to %.3f' % (self.get_std_y().item(),
+            std_y.item()))
+        self.std_y_par.data = self.InverseSoftplus(std_y)
+
+
     def noise_off(self):
         self.noise_is_on = False
 
@@ -331,15 +347,19 @@ class FNNBer(nn.Module):
     :param p: dropout rate, defaults to 0.5
     :param init_std_y: Initial standard deviation for input y. 
     :param h: A list specifying the number of neurons in each layer.
+    :param std_y_requires_grad: Whether `sigma_y` will require_grad and thus
+    be updated during optimization. Defaults to False.
     """
     LeakyReLUSlope = 1e-2
-    def __init__(self, p=0.2, init_std_y=1.0, h=[10, 1024,1024,1024,1024, 1]):
+    def __init__(self, p=0.2, init_std_y=1.0, h=[10, 1024,1024,1024,1024, 1],
+            std_y_requires_grad=False):
         super().__init__()
         # part before Bernoulli dropout
         self.init_std_y = init_std_y
-        InverseSoftplus = lambda sigma: torch.log(torch.exp(sigma) - 1 )
+        self.InverseSoftplus = lambda sigma: torch.log(torch.exp(sigma) - 1 )
         self.std_y_par = nn.parameter.Parameter(
-                InverseSoftplus(torch.tensor([init_std_y])))
+                self.InverseSoftplus(torch.tensor([init_std_y])))
+        self.std_y_par.requires_grad = std_y_requires_grad
         self.main = nn.Sequential(
                 nn.Linear(h[0], h[1]),
                 nn.LeakyReLU(self.LeakyReLUSlope),
@@ -363,6 +383,17 @@ class FNNBer(nn.Module):
     def get_std_y(self):
         return nn.Softplus()(self.std_y_par)
 
+    def change_std_y(self, std_y):
+        """
+        Update internal std_y to `std_y`
+        :param std_y: A singular, positive torch.tensor
+        """
+        assert std_y.numel() == 1
+        std_y = std_y.view((1,))
+        print('Updating std_y from %.3f to %.3f' % (self.get_std_y().item(),
+            std_y.item()))
+        self.std_y_par.data = self.InverseSoftplus(std_y)
+
     def forward(self, x):
         mu = self.main(x)
         sigma = self.sigma(mu)
@@ -526,9 +557,13 @@ class SmallFNNBer(FNNBer):
     :param p: dropout rate, defaults to 0.5
     :param init_std_y: Initial standard deviation for input y. 
     :param h: A list specifying the number of neurons in each layer.
+    :param std_y_requires_grad: Whether `sigma_y` will require_grad and thus
+    be updated during optimization. Defaults to False.
     """
-    def __init__(self, p=0.2, init_std_y=1.0, h=[10, 1024,1024,1024, 1]):
-        super().__init__(p=p, init_std_y=init_std_y)
+    def __init__(self, p=0.2, init_std_y=1.0, h=[10, 1024,1024,1024, 1],
+           std_y_requires_grad=False):
+        super().__init__(p=p, init_std_y=init_std_y,
+                std_y_requires_grad=std_y_requires_grad)
         self.main = nn.Sequential(
                 nn.Linear(h[0], h[1]),
                 nn.LeakyReLU(self.LeakyReLUSlope),
diff --git a/Experiments/configurations/eiv_california.json b/Experiments/configurations/eiv_california.json
new file mode 100644
index 0000000000000000000000000000000000000000..29a71622d99299d2c7ccde4f148b560de52418ee
--- /dev/null
+++ b/Experiments/configurations/eiv_california.json
@@ -0,0 +1,22 @@
+{
+	"long_dataname": "california_housing",
+	"short_dataname": "california",
+	"lr": 1e-3,
+	"batch_size": 200,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.1,
+	"lr_update": 20,
+	"epoch_offset": 10,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
+
diff --git a/Experiments/configurations/eiv_concrete.json b/Experiments/configurations/eiv_concrete.json
new file mode 100644
index 0000000000000000000000000000000000000000..11121c144b429ad2f970aa815f2a4e7fed8a893a
--- /dev/null
+++ b/Experiments/configurations/eiv_concrete.json
@@ -0,0 +1,22 @@
+{
+	"long_dataname": "concrete_strength",
+	"short_dataname": "concrete",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 20,
+	"epoch_offset": 10,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
+
diff --git a/Experiments/configurations/eiv_energy.json b/Experiments/configurations/eiv_energy.json
new file mode 100644
index 0000000000000000000000000000000000000000..bca6774dd7b4ce71ca78b487246d6a5e7f4b76f3
--- /dev/null
+++ b/Experiments/configurations/eiv_energy.json
@@ -0,0 +1,22 @@
+{
+	"long_dataname": "energy_efficiency",
+	"short_dataname": "energy",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 600,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 100,
+	"epoch_offset": 100,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
+
diff --git a/Experiments/configurations/eiv_kin8nm.json b/Experiments/configurations/eiv_kin8nm.json
new file mode 100644
index 0000000000000000000000000000000000000000..fa3718a096e417c2ab456a8589009d0052ae6670
--- /dev/null
+++ b/Experiments/configurations/eiv_kin8nm.json
@@ -0,0 +1,22 @@
+{
+	"long_dataname": "kin8nm",
+	"short_dataname": "kin8nm",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 30,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 20,
+	"epoch_offset": 19,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
+
diff --git a/Experiments/configurations/eiv_msd.json b/Experiments/configurations/eiv_msd.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c2527630dac6172c9271ee230b5c34a2abfd71f
--- /dev/null
+++ b/Experiments/configurations/eiv_msd.json
@@ -0,0 +1,21 @@
+{
+	"long_dataname": "million_song",
+	"short_dataname": "msd",
+	"lr": 1e-3,
+	"batch_size": 100,
+	"test_batch_size": 600,
+	"number_of_epochs": 10,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 4,
+	"epoch_offset": 4,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/eiv_naval.json b/Experiments/configurations/eiv_naval.json
new file mode 100644
index 0000000000000000000000000000000000000000..3358831d627188d5c2b93ceaa748b77de893c23d
--- /dev/null
+++ b/Experiments/configurations/eiv_naval.json
@@ -0,0 +1,21 @@
+{
+        "long_dataname": "naval_propulsion",
+        "short_dataname": "naval",
+        "lr": 1e-3,
+        "batch_size": 32,
+        "test_batch_size": 600,
+        "number_of_epochs": 30,
+        "unscaled_reg": 10,
+        "report_point": 5,
+        "p": 0.2,
+        "lr_update": 20,
+        "epoch_offset": 20,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+        "init_std_y_list": [0.5],
+        "gamma": 0.5,
+        "hidden_layers": [1024, 1024, 1024, 1024],
+        "fixed_std_x": 0.05,
+        "seed_range": [0,10],
+        "gpu_number": 1
+}
diff --git a/Experiments/configurations/eiv_power.json b/Experiments/configurations/eiv_power.json
new file mode 100644
index 0000000000000000000000000000000000000000..842f6e2662bef07696de1eb5f041e677b586dcd6
--- /dev/null
+++ b/Experiments/configurations/eiv_power.json
@@ -0,0 +1,21 @@
+{
+	"long_dataname": "power_plant",
+	"short_dataname": "power",
+	"lr": 1e-3,
+	"batch_size": 64,
+	"test_batch_size": 600,
+	"number_of_epochs": 35,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 10,
+	"epoch_offset": 15,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/eiv_protein.json b/Experiments/configurations/eiv_protein.json
new file mode 100644
index 0000000000000000000000000000000000000000..97f5946d431d04bc83c71904846152160fa0c7d7
--- /dev/null
+++ b/Experiments/configurations/eiv_protein.json
@@ -0,0 +1,21 @@
+{
+	"long_dataname": "protein_structure",
+	"short_dataname": "protein",
+	"lr": 1e-3,
+	"batch_size": 100,
+	"test_batch_size": 600,
+	"number_of_epochs": 30,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 10,
+	"epoch_offset": 10,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/eiv_wine.json b/Experiments/configurations/eiv_wine.json
new file mode 100644
index 0000000000000000000000000000000000000000..0b40a61272719cee6afc839f171103c54a830af8
--- /dev/null
+++ b/Experiments/configurations/eiv_wine.json
@@ -0,0 +1,21 @@
+{
+	"long_dataname": "wine_quality",
+	"short_dataname": "wine",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 30,
+	"epoch_offset": 50,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/eiv_yacht.json b/Experiments/configurations/eiv_yacht.json
new file mode 100644
index 0000000000000000000000000000000000000000..f0526e7b8a09c4d669674b4cd9033322868b8a4d
--- /dev/null
+++ b/Experiments/configurations/eiv_yacht.json
@@ -0,0 +1,21 @@
+{
+	"long_dataname": "yacht_hydrodynamics",
+	"short_dataname": "yacht",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 1200,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 200,
+	"epoch_offset": 250,
+	"eiv_prediction_number_of_draws": 100,
+	"eiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"fixed_std_x": 0.05,
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_california.json b/Experiments/configurations/noneiv_california.json
new file mode 100644
index 0000000000000000000000000000000000000000..5005b43d8272c22893903a8a6c7494edeed4e554
--- /dev/null
+++ b/Experiments/configurations/noneiv_california.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "california_housing",
+	"short_dataname": "california",
+	"lr": 1e-3,
+	"batch_size": 200,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.1,
+	"lr_update": 20,
+	"epoch_offset": 0 ,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_concrete.json b/Experiments/configurations/noneiv_concrete.json
new file mode 100644
index 0000000000000000000000000000000000000000..66552b7193675eda193479ff9909494fc2bc2133
--- /dev/null
+++ b/Experiments/configurations/noneiv_concrete.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "concrete_strength",
+	"short_dataname": "concrete",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 20,
+	"epoch_offset": 10,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_energy.json b/Experiments/configurations/noneiv_energy.json
new file mode 100644
index 0000000000000000000000000000000000000000..74eb45c721793e94f0efbb4d67a3f4acaa05e8da
--- /dev/null
+++ b/Experiments/configurations/noneiv_energy.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "energy_efficiency",
+	"short_dataname": "energy",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 600,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 100,
+	"epoch_offset": 100,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_kin8nm.json b/Experiments/configurations/noneiv_kin8nm.json
new file mode 100644
index 0000000000000000000000000000000000000000..22615f61b00cf2b06c6e5fbc0051f3c86e8f92cb
--- /dev/null
+++ b/Experiments/configurations/noneiv_kin8nm.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "kin8nm",
+	"short_dataname": "kin8nm",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 30,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 20,
+	"epoch_offset": 19,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_msd.json b/Experiments/configurations/noneiv_msd.json
new file mode 100644
index 0000000000000000000000000000000000000000..1c4f338fe3152c6a21d66cff05376efcbf88f148
--- /dev/null
+++ b/Experiments/configurations/noneiv_msd.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "million_song",
+	"short_dataname": "msd",
+	"lr": 1e-3,
+	"batch_size": 100,
+	"test_batch_size": 600,
+	"number_of_epochs": 10,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 4,
+	"epoch_offset": 4,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_naval.json b/Experiments/configurations/noneiv_naval.json
new file mode 100644
index 0000000000000000000000000000000000000000..5dea6c49692e193dc25818ae83c6f467860a1520
--- /dev/null
+++ b/Experiments/configurations/noneiv_naval.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "naval_propulsion",
+	"short_dataname": "naval",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 30,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 20,
+	"epoch_offset": 20,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_power.json b/Experiments/configurations/noneiv_power.json
new file mode 100644
index 0000000000000000000000000000000000000000..6e524e6c4f684f9c0affc983f3d2d5a7e4597248
--- /dev/null
+++ b/Experiments/configurations/noneiv_power.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "power_plant",
+	"short_dataname": "power",
+	"lr": 1e-3,
+	"batch_size": 64,
+	"test_batch_size": 600,
+	"number_of_epochs": 35,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 10,
+	"epoch_offset": 15,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_protein.json b/Experiments/configurations/noneiv_protein.json
new file mode 100644
index 0000000000000000000000000000000000000000..1edd04fa1d28b8634080beec1865d30b6f854153
--- /dev/null
+++ b/Experiments/configurations/noneiv_protein.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "protein_structure",
+	"short_dataname": "protein",
+	"lr": 1e-3,
+	"batch_size": 100,
+	"test_batch_size": 600,
+	"number_of_epochs": 30,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 10,
+	"epoch_offset": 10,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_wine.json b/Experiments/configurations/noneiv_wine.json
new file mode 100644
index 0000000000000000000000000000000000000000..717fb33c371b6d3743da58c4489ed96bd3e62ea6
--- /dev/null
+++ b/Experiments/configurations/noneiv_wine.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "wine_quality",
+	"short_dataname": "wine",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 800,
+	"number_of_epochs": 100,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 30,
+	"epoch_offset": 50,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/configurations/noneiv_yacht.json b/Experiments/configurations/noneiv_yacht.json
new file mode 100644
index 0000000000000000000000000000000000000000..9b425eb15a735131e47eb3ba0b1a990375122548
--- /dev/null
+++ b/Experiments/configurations/noneiv_yacht.json
@@ -0,0 +1,20 @@
+{
+	"long_dataname": "yacht_hydrodynamics",
+	"short_dataname": "yacht",
+	"lr": 1e-3,
+	"batch_size": 32,
+	"test_batch_size": 600,
+	"number_of_epochs": 1200,
+	"unscaled_reg": 10,
+	"report_point": 5,
+	"p": 0.2,
+	"lr_update": 200,
+	"epoch_offset": 250,
+	"noneiv_prediction_number_of_draws": 100,
+	"noneiv_prediction_number_of_batches": 10,
+	"init_std_y_list": [0.5],
+	"gamma": 0.5,
+	"hidden_layers": [1024, 1024, 1024, 1024],
+	"seed_range": [0,10],
+	"gpu_number": 1
+}
diff --git a/Experiments/train_eiv_california.py b/Experiments/train_eiv.py
similarity index 64%
rename from Experiments/train_eiv_california.py
rename to Experiments/train_eiv.py
index f90a204c2134254078ea55b76e5009ceb19d3402..5c9032bbc5af5515a06453548370b7fbb9c713da 100644
--- a/Experiments/train_eiv_california.py
+++ b/Experiments/train_eiv.py
@@ -1,8 +1,10 @@
 """
-Train EiV model on california housing dataset using different seeds
+Train EiV model using different seeds
 """
 import random
+import importlib
 import os
+import json
 
 import numpy as np
 import torch
@@ -11,33 +13,51 @@ from torch.utils.data import DataLoader
 from torch.utils.tensorboard.writer import SummaryWriter
 
 from EIVArchitectures import Networks, initialize_weights
-from EIVData.california_housing import load_data
 from EIVTrainingRoutines import train_and_store, loss_functions
 
-# hyperparameters
-lr = 1e-3
-batch_size = 200
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.1
-lr_update = 20
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
+data = 'california'
+
+# load hyperparameters from JSON file
+with open(os.path.join('configurations',f'eiv_{data}.json'),'r') as conf_file:
+    conf_dict = json.load(conf_file)
+
+long_dataname = conf_dict["long_dataname"]
+short_dataname = conf_dict["short_dataname"]
+lr = conf_dict["lr"]
+batch_size = conf_dict["batch_size"]
+test_batch_size = conf_dict["test_batch_size"]
+number_of_epochs = conf_dict["number_of_epochs"]
+unscaled_reg = conf_dict["unscaled_reg"]
+report_point = conf_dict["report_point"]
+p = conf_dict["p"]
+lr_update = conf_dict["lr_update"]
+# offset before updating sigma_y after each epoch
+epoch_offset = conf_dict["epoch_offset"]
+# will be used to predict the RMSE and update sigma_y accordingly
+eiv_prediction_number_of_draws = conf_dict["eiv_prediction_number_of_draws"]
+eiv_prediction_number_of_batches = conf_dict["eiv_prediction_number_of_batches"]
+init_std_y_list = conf_dict["init_std_y_list"]
+fixed_std_x = conf_dict['fixed_std_x']
+gamma = conf_dict["gamma"]
+hidden_layers = conf_dict["hidden_layers"]
+seed_range = conf_dict['seed_range']
+
+try:
+    gpu_number = conf_dict["gpu_number"]
+    device = torch.device(f'cuda:{gpu_number}' if torch.cuda.is_available() else 'cpu')
+except KeyError:
+    device = torch.device('cpu')
+
+load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
 
 # reproducability
+seed_list = range(seed_range[0], seed_range[1])
+
 def set_seeds(seed):
     torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
+    np.random.seed(seed) 
     random.seed(seed) 
     torch.manual_seed(seed)
-seed_list = range(10)
 
 # to store the RMSE
 rmse_chain = []
@@ -59,8 +79,26 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         Overwrites the corresponding method
         """
         if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
+            pred_collection = []
+            y_collection = []
+            for i, (x,y) in  enumerate(self.train_dataloader):
+                if i >= eiv_prediction_number_of_batches:
+                    break
+                if len(y.shape) <= 1:
+                    y = y.view((-1,1))
+                x,y = x.to(device), y.to(device)
+                pred, _ = net.predict(x,
+                        number_of_draws=eiv_prediction_number_of_draws,
+                        remove_graph = True,
+                        take_average_of_prediction=True)
+                pred_collection.append(pred)
+                y_collection.append(y)
+            pred_collection = torch.cat(pred_collection, dim=0)
+            y_collection = torch.cat(y_collection, dim=0)
+            assert pred_collection.shape == y_collection.shape
+            rmse = torch.sqrt(torch.mean((pred_collection - y_collection)**2))
+            net.change_std_y(rmse)
+        self.lr_scheduler.step()
 
     def extra_report(self, net, i):
         """
@@ -132,7 +170,7 @@ def train_on_data(init_std_y, seed):
             lr=lr, reg=reg, report_point=report_point, device=device)
     # run and save
     save_file = os.path.join('saved_networks',
-            f'eiv_california'\
+            f'eiv_{short_dataname}'\
                     f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
                     f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
                     f'_seed_{seed}.pkl')
@@ -146,7 +184,7 @@ if __name__ == '__main__':
     for seed in seed_list:
         # Tensorboard monitoring
         writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_california_lr_{lr:.4f}_seed'\
+                f'run_eiv_{short_dataname}_lr_{lr:.4f}_seed'\
                 f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
                 f'_fixed_std_x_{fixed_std_x:.3f}')
         print(f'>>>>SEED: {seed}')
diff --git a/Experiments/train_eiv_concrete.py b/Experiments/train_eiv_concrete.py
deleted file mode 100644
index e637d359d9a7eed1dd87f40d3dfd8709040298cf..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_concrete.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on concrete strength dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.concrete_strength import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_concrete'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_concrete_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_energy.py b/Experiments/train_eiv_energy.py
deleted file mode 100644
index 304501b089043b8ea701f088e9859b655171dbd2..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_energy.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on the energy efficiency dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.energy_efficiency import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 600
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 100
-# pretraining = 300
-epoch_offset = 100
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_energy'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_energy_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_kin8nm.py b/Experiments/train_eiv_kin8nm.py
deleted file mode 100644
index 96b6c79c9316978e656ba901ff2f3fd6b0814026..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_kin8nm.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on the kin8nm dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.kin8nm import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 19
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_kin8nm'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_kin8nm_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_msd.py b/Experiments/train_eiv_msd.py
deleted file mode 100644
index 4b533b691d5c052d2e28a43eac3c89caca1e70fa..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_msd.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on the million song dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.million_song import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 100
-test_batch_size = 600
-number_of_epochs = 10
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 4
-# pretraining = 300
-epoch_offset = 4
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_msd'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_msd_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_naval.py b/Experiments/train_eiv_naval.py
deleted file mode 100644
index f4a9aa75a4b8e00dce105764544b2b2a9d9ad192..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_naval.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on the naval propulsion dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.naval_propulsion import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 20
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_naval'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_naval_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_power.py b/Experiments/train_eiv_power.py
deleted file mode 100644
index d9be09e4b7129599a41a784ed66e79f795fe67b8..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_power.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-Train EiV model on power plant dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.power_plant import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 64
-test_batch_size = 600
-number_of_epochs = 35
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 10
-# pretraining = 300
-epoch_offset = 15
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_power'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_power_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_eiv_protein.py b/Experiments/train_eiv_protein.py
deleted file mode 100644
index 625fb6a3620af1fd5aef227895ae39ebc7095c91..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_protein.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on protein structure dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.protein_structure import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 100
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 10
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_protein'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_protein_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_wine.py b/Experiments/train_eiv_wine.py
deleted file mode 100644
index d6c9a41f9737005330c6aca56bf060a94a71ad56..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_wine.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""
-Train EiV model on wine quality dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.wine_quality import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 30
-# pretraining = 300
-epoch_offset = 50
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_wine'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_wine_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_eiv_yacht.py b/Experiments/train_eiv_yacht.py
deleted file mode 100644
index 10e2607ea151ff071fd1ca8ebee8e8ccc6885b6a..0000000000000000000000000000000000000000
--- a/Experiments/train_eiv_yacht.py
+++ /dev/null
@@ -1,155 +0,0 @@
-"""
-Train EiV model on the yacht hydrodynamics dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.yacht_hydrodynamics import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 1200
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 200
-# pretraining = 300
-epoch_offset = 250
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-fixed_std_x = 0.05
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net_noise_state = net.noise_is_on
-        net.eval()
-        net.noise_off()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        if net_noise_state:
-            net.noise_on()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNEIV(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim],
-            fixed_std_x=fixed_std_x)
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: net.get_std_x().detach().cpu().item()
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_eiv
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'eiv_yacht'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
-                    f'_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_eiv_yacht_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}'\
-                f'_fixed_std_x_{fixed_std_x:.3f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_california.py b/Experiments/train_noneiv.py
similarity index 62%
rename from Experiments/train_noneiv_california.py
rename to Experiments/train_noneiv.py
index f359b4303aff0536ae52320c3e138478ac5da2ea..3d741b30e632f8c7232da70c3eb91dbfb4210734 100644
--- a/Experiments/train_noneiv_california.py
+++ b/Experiments/train_noneiv.py
@@ -1,8 +1,10 @@
 """
-Train non-EiV model on california housing dataset using different seeds
+Train non-EiV model using different seeds
 """
 import random
+import importlib
 import os
+import json
 
 import numpy as np
 import torch
@@ -11,32 +13,50 @@ from torch.utils.data import DataLoader
 from torch.utils.tensorboard.writer import SummaryWriter
 
 from EIVArchitectures import Networks, initialize_weights
-from EIVData.california_housing import load_data
 from EIVTrainingRoutines import train_and_store, loss_functions
 
-# hyperparameters
-lr = 1e-3
-batch_size = 200
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.1
-lr_update = 20
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
+data = 'california'
+
+# load hyperparameters from JSON file
+with open(os.path.join('configurations',f'noneiv_{data}.json'),'r') as conf_file:
+    conf_dict = json.load(conf_file)
+
+long_dataname = conf_dict["long_dataname"]
+short_dataname = conf_dict["short_dataname"]
+lr = conf_dict["lr"]
+batch_size = conf_dict["batch_size"]
+test_batch_size = conf_dict["test_batch_size"]
+number_of_epochs = conf_dict["number_of_epochs"]
+unscaled_reg = conf_dict["unscaled_reg"]
+report_point = conf_dict["report_point"]
+p = conf_dict["p"]
+lr_update = conf_dict["lr_update"]
+# offset before updating sigma_y after each epoch
+epoch_offset = conf_dict["epoch_offset"]
+# will be used to predict the RMSE and update sigma_y accordingly
+noneiv_prediction_number_of_draws = conf_dict["noneiv_prediction_number_of_draws"]
+noneiv_prediction_number_of_batches = conf_dict["noneiv_prediction_number_of_batches"]
+init_std_y_list = conf_dict["init_std_y_list"]
+gamma = conf_dict["gamma"]
+hidden_layers = conf_dict["hidden_layers"]
+seed_range = conf_dict['seed_range']
+
+try:
+    gpu_number = conf_dict["gpu_number"]
+    device = torch.device(f'cuda:{gpu_number}' if torch.cuda.is_available() else 'cpu')
+except KeyError:
+    device = torch.device('cpu')
+
+load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
 
 # reproducability
+seed_list = range(seed_range[0], seed_range[1])
+
 def set_seeds(seed):
     torch.backends.cudnn.benchmark = False
     np.random.seed(seed)
     random.seed(seed) 
     torch.manual_seed(seed)
-seed_list = range(10)
 
 # to store the RMSE
 rmse_chain = []
@@ -58,8 +78,26 @@ class UpdatedTrainEpoch(train_and_store.TrainEpoch):
         Overwrites the corresponding method
         """
         if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
+            pred_collection = []
+            y_collection = []
+            for i, (x,y) in  enumerate(self.train_dataloader):
+                if i>= noneiv_prediction_number_of_batches:
+                    break
+                if len(y.shape) <= 1:
+                    y = y.view((-1,1))
+                x,y = x.to(device), y.to(device)
+                pred, _ = net.predict(x,
+                        number_of_draws=noneiv_prediction_number_of_draws,
+                        remove_graph = True,
+                        take_average_of_prediction=True)
+                pred_collection.append(pred)
+                y_collection.append(y)
+            pred_collection = torch.cat(pred_collection, dim=0)
+            y_collection = torch.cat(y_collection, dim=0)
+            assert pred_collection.shape == y_collection.shape
+            rmse = torch.sqrt(torch.mean((pred_collection - y_collection)**2))
+            net.change_std_y(rmse)
+        self.lr_scheduler.step()
 
     def extra_report(self, net, i):
         """
@@ -126,7 +164,7 @@ def train_on_data(init_std_y, seed):
             lr=lr, reg=reg, report_point=report_point, device=device)
     # run and save
     save_file = os.path.join('saved_networks',
-            f'noneiv_california'\
+            f'noneiv_{short_dataname}'\
                     f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
                     f'_p_{p:.2f}_seed_{seed}.pkl')
     train_and_store.train_and_store(net=net, 
@@ -139,7 +177,7 @@ if __name__ == '__main__':
     for seed in seed_list:
         # Tensorboard monitoring
         writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_california_lr_{lr:.4f}_seed'\
+                f'run_noneiv_{short_dataname}_lr_{lr:.4f}_seed'\
                 f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
         print(f'>>>>SEED: {seed}')
         for init_std_y in init_std_y_list:
diff --git a/Experiments/train_noneiv_concrete.py b/Experiments/train_noneiv_concrete.py
deleted file mode 100644
index 5e7c1e33ea750b5ddc661471969682ecab2008cd..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_concrete.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Train non-EiV model on concrete strength dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.concrete_strength import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_concrete'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_concrete_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_noneiv_energy.py b/Experiments/train_noneiv_energy.py
deleted file mode 100644
index 340b81ad389e443a2cc67a6e02388f463b735b0f..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_energy.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Train non-EiV model on energy efficiency dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.energy_efficiency import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 600
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 100
-# pretraining = 300
-epoch_offset = 100
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_energy'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_energy_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_noneiv_kin8nm.py b/Experiments/train_noneiv_kin8nm.py
deleted file mode 100644
index 7070c381f25135038a4e6b7ca5058d7efbccfb7f..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_kin8nm.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Train non-EiV model on the kin8nm dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.kin8nm import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 19
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_kin8nm'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_kin8nm_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_noneiv_msd.py b/Experiments/train_noneiv_msd.py
deleted file mode 100644
index b72644d9e964d15e8e6fbf97a6b4f7e43226bce1..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_msd.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""
-Train non-EiV model on the million song dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.million_song import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 100
-test_batch_size = 600
-number_of_epochs = 10
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 4
-# pretraining = 300
-epoch_offset = 4
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_msd'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_msd_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_naval.py b/Experiments/train_noneiv_naval.py
deleted file mode 100644
index fb8c57f6774981d5099f8c18f2b6e0dbba58af5e..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_naval.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""
-Train non-EiV model on the naval propulsion dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.naval_propulsion import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 20
-# pretraining = 300
-epoch_offset = 20
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_naval'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_naval_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_power.py b/Experiments/train_noneiv_power.py
deleted file mode 100644
index 3c87a2d74e27f09692d393366b0bacd8396c2881..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_power.py
+++ /dev/null
@@ -1,146 +0,0 @@
-"""
-Train non-EiV model on power plant dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.power_plant import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 64
-test_batch_size = 600
-number_of_epochs = 35
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 10
-# pretraining = 300
-epoch_offset = 15
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_power'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_power_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
diff --git a/Experiments/train_noneiv_protein.py b/Experiments/train_noneiv_protein.py
deleted file mode 100644
index 9e2d3f4095520182cc3b8f859c746c691f14722b..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_protein.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Train non-EiV model on protein structure dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.protein_structure import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 100
-test_batch_size = 600
-number_of_epochs = 30
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 10
-# pretraining = 300
-epoch_offset = 10
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_protein'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_protein_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_noneiv_wine.py b/Experiments/train_noneiv_wine.py
deleted file mode 100644
index a445c1e87b3e72069614b4d4bb3f5f31a8477566..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_wine.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""
-Train non-EiV model on wine quality dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.wine_quality import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 800
-number_of_epochs = 100
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 30
-# pretraining = 300
-epoch_offset = 50
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_wine'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_wine_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)
-
-
diff --git a/Experiments/train_noneiv_yacht.py b/Experiments/train_noneiv_yacht.py
deleted file mode 100644
index 489a81c146a1e63a679c1235c590add5be8785ca..0000000000000000000000000000000000000000
--- a/Experiments/train_noneiv_yacht.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""
-Train non-EiV model on the yacht hydrodynamics dataset using different seeds
-"""
-import random
-import os
-
-import numpy as np
-import torch
-import torch.backends.cudnn
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard.writer import SummaryWriter
-
-from EIVArchitectures import Networks, initialize_weights
-from EIVData.yacht_hydrodynamics import load_data
-from EIVTrainingRoutines import train_and_store, loss_functions
-
-# hyperparameters
-lr = 1e-3
-batch_size = 32
-test_batch_size = 600
-number_of_epochs = 1200
-unscaled_reg = 10
-report_point = 5
-p = 0.2
-lr_update = 200
-# pretraining = 300
-epoch_offset = 250
-init_std_y_list = [0.5]
-gamma = 0.5
-hidden_layers = [1024, 1024, 1024, 1024]
-device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
-
-# reproducability
-def set_seeds(seed):
-    torch.backends.cudnn.benchmark = False
-    np.random.seed(seed)
-    random.seed(seed) 
-    torch.manual_seed(seed)
-seed_list = range(10)
-
-# to store the RMSE
-rmse_chain = []
-
-class UpdatedTrainEpoch(train_and_store.TrainEpoch):
-    def pre_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch == 0:
-            self.lr = self.initial_lr
-            self.optimizer = torch.optim.Adam(net.parameters(), lr=self.lr)
-            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
-            self.optimizer, lr_update, gamma)
-
-
-    def post_epoch_update(self, net, epoch):
-        """
-        Overwrites the corresponding method
-        """
-        if epoch >= epoch_offset:
-            net.std_y_par.requires_grad = True
-        self.lr_scheduler.step() 
-
-    def extra_report(self, net, i):
-        """
-        Overwrites the corresponding method
-        and fed after initialization of this class
-        """
-        rmse = self.rmse(net).item()
-        rmse_chain.append(rmse)
-        writer.add_scalar('RMSE', rmse, self.total_count)
-        writer.add_scalar('std_y', self.last_std_y, self.total_count)
-        writer.add_scalar('RMSE:std_y', rmse/self.last_std_y, self.total_count)
-        writer.add_scalar('train loss', self.last_train_loss, self.total_count)
-        writer.add_scalar('test loss', self.last_test_loss, self.total_count)
-        print(f'RMSE {rmse:.3f}')
-
-    def rmse(self, net):
-        """
-        Compute the root mean squared error for `net`
-        """
-        net_train_state = net.training
-        net.eval()
-        x, y = next(iter(self.test_dataloader))
-        if len(y.shape) <= 1:
-            y = y.view((-1,1))
-        out = net(x.to(device))[0].detach().cpu()
-        assert out.shape == y.shape
-        if net_train_state:
-            net.train()
-        return torch.sqrt(torch.mean((out-y)**2))
-
-def train_on_data(init_std_y, seed):
-    """
-    Sets `seed`, loads data and trains an Bernoulli Modell, starting with
-    `init_std_y`.
-    """
-    # set seed
-    set_seeds(seed)
-    # load Datasets
-    train_data, test_data = load_data(seed=seed, splitting_part=0.8,
-            normalize=True)
-    # make dataloaders
-    train_dataloader = DataLoader(train_data, batch_size=batch_size, 
-            shuffle=True)
-    test_dataloader = DataLoader(test_data, batch_size=test_batch_size,
-            shuffle=True)
-    # create a net
-    input_dim = train_data[0][0].numel()
-    output_dim = train_data[0][1].numel()
-    net = Networks.FNNBer(p=p,
-            init_std_y=init_std_y,
-            h=[input_dim, *hidden_layers, output_dim])
-    net.apply(initialize_weights.glorot_init)
-    net = net.to(device)
-    net.std_y_par.requires_grad = False
-    std_x_map = lambda: 0.0
-    std_y_map = lambda: net.get_std_y().detach().cpu().item()
-    # regularization
-    reg = unscaled_reg/len(train_data)
-    # create epoch_map
-    criterion = loss_functions.nll_reg_loss
-    epoch_map = UpdatedTrainEpoch(train_dataloader=train_dataloader,
-            test_dataloader=test_dataloader,
-            criterion=criterion, std_y_map=std_y_map, std_x_map=std_x_map,
-            lr=lr, reg=reg, report_point=report_point, device=device)
-    # run and save
-    save_file = os.path.join('saved_networks',
-            f'noneiv_yacht'\
-                    f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
-                    f'_p_{p:.2f}_seed_{seed}.pkl')
-    train_and_store.train_and_store(net=net, 
-            epoch_map=epoch_map,
-            number_of_epochs=number_of_epochs,
-            save_file=save_file)
-    
-
-if __name__ == '__main__':
-    for seed in seed_list:
-        # Tensorboard monitoring
-        writer = SummaryWriter(log_dir=f'/home/martin09/tmp/tensorboard/'\
-                f'run_noneiv_yacht_lr_{lr:.4f}_seed'\
-                f'_{seed}_uregu_{unscaled_reg:.1f}_p_{p:.2f}')
-        print(f'>>>>SEED: {seed}')
-        for init_std_y in init_std_y_list:
-            print(f'Using init_std_y={init_std_y:.3f}')
-            train_on_data(init_std_y, seed)