Skip to content
Snippets Groups Projects
Commit 1feac39b authored by Jörg Martin's avatar Jörg Martin
Browse files

coverage_plotting renamed to coverage_collect

parent 164c9ae5
No related branches found
No related tags found
No related merge requests found
"""
Get the numerical vs the theoretical coverage for different coverage factors
and plot them. This module contains
- The function `get_coverages`, that returns two numpy arrays containing
the numerical and theoretical coverage.
- The function `get_coverages_with_uncertainties`, that runs through several
networks and collects the results of `get_coverages`.
- The function `plot_coverages`, that plots the results of
`get_coverage_distribution`.
Collect coverages for various coverage factors, networks and dataloaders.
"""
import importlib
import os
import argparse
import json
import numpy as np
import torch
import torch.backends.cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
from EIVArchitectures import Networks
from EIVTrainingRoutines import train_and_store
from EIVGeneral.coverage_metrics import epistemic_coverage, normalized_std
from EIVData.repeated_sampling import repeated_sampling
import matplotlib.pyplot as plt
def get_coverages(not_averaged_predictions, y,\
q_range=np.linspace(0.1,0.9,num=30)):
......@@ -96,85 +75,10 @@ def get_coverage_distribution(net_iterator, dataloader_iterator,
y_collection = torch.concat(y_collection, dim=0)
numerical_coverage, theoretical_coverage = get_coverages(
not_averaged_predictions=not_av_pred_collection,
y=y_collection)
y=y_collection, q_range=q_range)
num_coverage_collection.append(numerical_coverage)
th_coverage_collection.append(theoretical_coverage)
if stack:
num_coverage_collection = np.stack(num_coverage_collection, axis=-1)
th_coverage_collection = np.stack(th_coverage_collection, axis=-1)
return num_coverage_collection, th_coverage_collection
#######
# data = 'linear'
# # load hyperparameters from JSON file
# with open(os.path.join('/home/martin09/san/Projects/journal_eiv/Experiments/configurations',f'eiv_{data}.json'),'r') as conf_file:
# eiv_conf_dict = json.load(conf_file)
# with open(os.path.join('/home/martin09/san/Projects/journal_eiv/Experiments/configurations',f'noneiv_{data}.json'),'r') as conf_file:
# noneiv_conf_dict = json.load(conf_file)
# seed = 0
# long_dataname = eiv_conf_dict["long_dataname"]
# short_dataname = eiv_conf_dict["short_dataname"]
# load_data = importlib.import_module(f'EIVData.{long_dataname}').load_data
# train, test = load_data()
# test_dataloader = DataLoader(test, batch_size=1000)
# device = torch.device('cuda:0')
# x,y = next(iter(test_dataloader))
# if len(y.shape) <= 1:
# y = y.view((-1,1))
# if len(x.shape) <= 1:
# x = x.view((-1,1))
# x,y = x.to(device), y.to(device)
# input_dim = x.shape[1]
# output_dim = y.shape[1]
# init_std_y = noneiv_conf_dict["init_std_y_list"][0]
# unscaled_reg = noneiv_conf_dict["unscaled_reg"]
# p = noneiv_conf_dict["p"]
# hidden_layers = noneiv_conf_dict["hidden_layers"]
# saved_file = os.path.join('saved_networks',
# f'noneiv_{short_dataname}'\
# f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
# f'_p_{p:.2f}_seed_{seed}.pkl')
# noneiv_net = Networks.FNNBer(p=p, init_std_y=init_std_y,
# h=[input_dim, *hidden_layers, output_dim]).to(device)
# train_and_store.open_stored_training(saved_file=saved_file,
# net=noneiv_net, device=device)
# # EiV
# init_std_y = eiv_conf_dict["init_std_y_list"][0]
# unscaled_reg = eiv_conf_dict["unscaled_reg"]
# p = eiv_conf_dict["p"]
# hidden_layers = eiv_conf_dict["hidden_layers"]
# fixed_std_x = eiv_conf_dict["fixed_std_x"]
# saved_file = os.path.join('saved_networks',
# f'eiv_{short_dataname}'\
# f'_init_std_y_{init_std_y:.3f}_ureg_{unscaled_reg:.1f}'\
# f'_p_{p:.2f}_fixed_std_x_{fixed_std_x:.3f}'\
# f'_seed_{seed}.pkl')
# eiv_net = Networks.FNNEIV(p=p, init_std_y=init_std_y,
# h=[input_dim, *hidden_layers, output_dim],
# fixed_std_x=fixed_std_x).to(device)
# def eiv_net_iterator(seed_range=range(0,10)):
# train_and_store.open_stored_training(saved_file=saved_file,
# net=eiv_net, device=device)
# noneiv_not_averaged_predictions = noneiv_net.predict(x,\
# number_of_draws=100,
# take_average_of_prediction=False)
# noneiv_num_cov, noneiv_th_cov = get_coverages(noneiv_not_averaged_predictions, y,\
# q_range=np.linspace(0.01,0.99,num=30))
# eiv_not_averaged_predictions = eiv_net.predict(x,\
# number_of_draws=[100,5],
# take_average_of_prediction=False)
# eiv_num_cov, eiv_th_cov = get_coverages(eiv_not_averaged_predictions, y,\
# q_range=np.linspace(0.01,0.99,num=30))
# lin_x = np.linspace(np.min(noneiv_th_cov), np.max(noneiv_th_cov))
# plt.plot(lin_x, lin_x)
# plt.plot(noneiv_th_cov, noneiv_num_cov)
# plt.plot(eiv_th_cov, eiv_num_cov)
......@@ -4,29 +4,22 @@ Results will be stored in the results folder
"""
import importlib
import os
import argparse
import json
import numpy as np
import torch
import torch.backends.cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm
import matplotlib.pyplot as plt
from EIVArchitectures import Networks
from EIVTrainingRoutines import train_and_store
from EIVGeneral.coverage_plotting import get_coverage_distribution
from EIVGeneral.coverage_collect import get_coverage_distribution
from EIVGeneral.manipulate_datasets import VerticalCut
# read in data via --data option
parser = argparse.ArgumentParser()
parser.add_argument("--data", help="Loads data", default='naval')
parser.add_argument("--no-autoindent", help="",
action="store_true") # to avoid conflics in IPython
args = parser.parse_args()
data = args.data
data = 'linear'
# load hyperparameters from JSON file
with open(os.path.join('configurations',f'eiv_{data}.json'),'r') as conf_file:
......@@ -114,7 +107,7 @@ def net_iterator(eiv=True, seed_list=seed_list):
# dataloaders
def dataloader_iterator(seed_list=seed_list, use_ground_truth=False,
batch_size = 1000):
batch_size = 100):
for seed in seed_list:
if not use_ground_truth:
train_data, test_data = load_data(seed=seed)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment