-
Jörg Martin authoredJörg Martin authored
million_song.py 1.03 KiB
import torch
from EIVData.csv_dataset import CSVData
from torch.utils.data import random_split
def load_data(seed=0, splitting_part=0.8, normalize=True):
"""
Loads the million song dataset
:param seed: Seed for splitting and shuffling the data.
Defaults to 0.
:param splitting_part: Which fraction of the data to use as training
data. Defaults to 0.8.
:normalize: Whether to normalize the data, defaults to True.
:returns: million_trainset, million_testset
"""
msd_dataset = CSVData('~/SharedData/AI/datasets/year_prediction_MSD/YearPredictionMSD.txt',
class_name=0,
shuffle_seed=seed,
header=None,
delimiter=',',
normalize=normalize)
dataset_len = len(msd_dataset)
train_len = int(dataset_len*splitting_part)
test_len = dataset_len - train_len
msd_trainset, msd_testset = random_split(msd_dataset,
lengths=[train_len, test_len],
generator=torch.Generator().manual_seed(seed))
return msd_trainset, msd_testset