Skip to content
Snippets Groups Projects
Commit 2298b661 authored by Jörg Martin's avatar Jörg Martin
Browse files

First draft on intersection plots

parent 5c71af7f
No related branches found
No related tags found
No related merge requests found
"""
Compute metrics for datasets for which there is not necessarily a ground truth.
Results will be stored in the results folder
"""
import importlib
import os
import argparse
import json
import numpy as np
import torch
import torch.backends.cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
import matplotlib.pyplot as plt
from EIVArchitectures import Networks
from EIVTrainingRoutines import train_and_store
from EIVGeneral.coverage_metrics import epistemic_coverage, normalized_std,\
total_coverage
from EIVData.repeated_sampling import repeated_sampling
from EIVGeneral.linear_evaluation import linear_pred_unc, linear_coverage, compute_par_est_var
# read in data via --data option
parser = argparse.ArgumentParser()
parser.add_argument("--data", help="Loads data", default='yacht')
parser.add_argument("--no-autoindent", help="",
action="store_true") # to avoid conflics in IPython
args = parser.parse_args()
data = args.data
# load hyperparameters from JSON file
with open(os.path.join('configurations',f'eiv_{data}.json'),'r') as conf_file:
eiv_conf_dict = json.load(conf_file)
with open(os.path.join('configurations',f'noneiv_{data}.json'),'r') as conf_file:
noneiv_conf_dict = json.load(conf_file)
# assuming normalized data was used
try:
assert eiv_conf_dict['normalize']
assert noneiv_conf_dict['normalize']
except KeyError:
pass
normalize = True
long_dataname = eiv_conf_dict["long_dataname"]
short_dataname = eiv_conf_dict["short_dataname"]
print(f"Evaluating {long_dataname}")
scale_outputs = False
load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
try:
sigma_y = importlib.import_module(f'EIVData.{long_dataname}').y_noise_strength
design_matrix = importlib.import_module(f'EIVData.{long_dataname}').design_matrix
except AttributeError:
sigma_y = None
train_data, test_data = load_data(normalize=normalize)
input_dim = train_data[0][0].numel()
output_dim = train_data[0][1].numel()
# try:
# gpu_number = eiv_conf_dict["gpu_number"]
# device = torch.device(f'cuda:{gpu_number}')
# try:
# torch.tensor([0.0]).to(device)
# except RuntimeError:
# if torch.cuda.is_available():
# print('Switched to GPU 0')
# device = torch.device('cuda:0')
# else:
# print('No cuda available, using CPU')
# device = torch.device('cpu')
# except KeyError:
device = torch.device('cpu')
def collect_predictions(x, seed=0,
noneiv_number_of_draws=100, eiv_number_of_draws=[100,5],
device=device):
x = x.to(device)
# non-EiV
init_std_y = noneiv_conf_dict["init_std_y_list"][0]
unscaled_reg = noneiv_conf_dict["unscaled_reg"]
p = noneiv_conf_dict["p"]
hidden_layers = noneiv_conf_dict["hidden_layers"]
saved_file = os.path.join('saved_networks',
f'noneiv_{short_dataname}'\
f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
f'_p_{p:.2f}_seed_{seed}.pkl')
net = Networks.FNNBer(p=p, init_std_y=init_std_y,
h=[input_dim, *hidden_layers, output_dim]).to(device)
# load network and extract std_y
noneiv_std_y = train_and_store.open_stored_training(saved_file=saved_file,
net=net, device=device)[3]
# compute predictions and uncertainties
noneiv_results = {}
training_state = net.training
net.train()
not_averaged_predictions = net.predict(x,\
number_of_draws=noneiv_number_of_draws,
take_average_of_prediction=False)
noneiv_results['predictions'] = torch.mean(not_averaged_predictions[0], dim=1)
noneiv_results['uncertainties'] = torch.std(not_averaged_predictions[0], dim=1)
# EiV
eiv_results = {}
init_std_y = eiv_conf_dict["init_std_y_list"][0]
unscaled_reg = eiv_conf_dict["unscaled_reg"]
p = eiv_conf_dict["p"]
hidden_layers = eiv_conf_dict["hidden_layers"]
fixed_std_x = eiv_conf_dict["fixed_std_x"]
saved_file = os.path.join('saved_networks',
f'eiv_{short_dataname}'\
f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
f'_seed_{seed}.pkl')
net = Networks.FNNEIV(p=p, init_std_y=init_std_y,
h=[input_dim, *hidden_layers, output_dim],
fixed_std_x=fixed_std_x).to(device)
# load network and extract std_y
eiv_std_y = train_and_store.open_stored_training(saved_file=saved_file,
net=net, device=device)[3]
# compute predictions and uncertainties
training_state = net.training
noise_state = net.noise_is_on
net.train()
net.noise_on()
not_averaged_predictions = net.predict(x, number_of_draws=eiv_number_of_draws,
take_average_of_prediction=False)
eiv_results['predictions'] = torch.mean(not_averaged_predictions[0], dim=1)
eiv_results['uncertainties'] = torch.std(not_averaged_predictions[0], dim=1)
results = {'eiv': eiv_results,
'noneiv': noneiv_results}
return results
def create_diagonal(train, number_of_steps=100):
input_shape = train[0][0].shape
assert len(input_shape) == 1
input_dim = input_shape[0]
ones = torch.ones((1, input_dim))
t = torch.linspace(start=0, end=1, steps=number_of_steps)[...,None]
return (1-t) * ones - t * ones
assert noneiv_conf_dict["seed_range"] == eiv_conf_dict["seed_range"]
seed_list = range(noneiv_conf_dict["seed_range"][0],
noneiv_conf_dict["seed_range"][1])
noneiv_predictions = 0
noneiv_uncertainties = 0
eiv_predictions = 0
eiv_uncertainties = 0
number_of_seeds = len(seed_list)
out_dim = 0
number_of_steps = 100
for seed in tqdm(seed_list):
x_diagonal = create_diagonal(train=train_data, number_of_steps=number_of_steps)
results = collect_predictions(x_diagonal,
seed=seed)
noneiv_predictions += 1/number_of_seeds * results['noneiv']['predictions'][..., out_dim]
noneiv_uncertainties += 1/number_of_seeds * results['noneiv']['uncertainties'][..., out_dim]
eiv_predictions += 1/number_of_seeds * results['eiv']['predictions'][..., out_dim]
eiv_uncertainties += 1/number_of_seeds * results['eiv']['uncertainties'][..., out_dim]
plt.figure(1)
plt.clf()
plot_x = torch.linspace(0,1, steps=number_of_steps)
plt.plot(plot_x, noneiv_predictions, color='b')
plt.fill_between(plot_x, noneiv_predictions - noneiv_uncertainties,noneiv_predictions + noneiv_uncertainties, color='b', alpha=0.5)
plt.plot(plot_x, eiv_predictions, color='r')
plt.fill_between(plot_x, eiv_predictions - eiv_uncertainties, eiv_predictions + eiv_uncertainties, color='r', alpha=0.5)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment