|
| 1 | +import torch |
| 2 | +from torch import nn |
| 3 | +from torch.autograd import Variable |
| 4 | +import torch.nn.functional as F |
| 5 | +from torch.utils import data |
| 6 | +from sklearn.model_selection import train_test_split |
| 7 | +import json |
| 8 | +import time |
| 9 | +from torchtext import data, datasets, vocab |
| 10 | +from argparse import ArgumentParser |
| 11 | +from torch.utils.tensorboard import SummaryWriter |
| 12 | + |
| 13 | +import random, math |
| 14 | +from numpy.random import seed |
| 15 | +import pandas as pd |
| 16 | +import numpy as np |
| 17 | +from sklearn.metrics import mean_squared_error |
| 18 | +import random, tqdm, sys, math, gzip |
| 19 | +import nlp |
| 20 | + |
| 21 | +class Dataset(data.Dataset): |
| 22 | + def __init__(self, inputs, targets): |
| 23 | + assert len(inputs) == len(targets), 'Length of inputs and targets should be same.' |
| 24 | + self.inputs = inputs |
| 25 | + self.targets = targets |
| 26 | + |
| 27 | + def __len__(self): |
| 28 | + return len(self.inputs) |
| 29 | + |
| 30 | + def __getitem__(self, index): |
| 31 | + return self.inputs[index], self.targets[index] |
| 32 | + |
| 33 | +def create_datasets(arg): |
| 34 | + if (arg.task == 'TOTTO'): |
| 35 | + return create_totto_datasets(arg) |
| 36 | + elif (arg.task == 'cnn_dailymail'): |
| 37 | + return create_cnn_dailymail_datasets(arg) |
| 38 | + else: |
| 39 | + assert False, f'Data fetching for {arg.task} not defined' |
| 40 | + |
| 41 | +#For Totto, we do not have the reference sentences for test set. |
| 42 | +#So, we can divide the training set into train and validation |
| 43 | +#and use actual validation set as test set |
| 44 | +def read_file(arg, f): |
| 45 | + with open(f) as fp: |
| 46 | + lines = fp.readlines() |
| 47 | + inputs = [] |
| 48 | + targets = [] |
| 49 | + for line in lines: |
| 50 | + entry = json.loads(line) |
| 51 | + # print(entry) |
| 52 | + if (arg.input_string == 'raw_input'): |
| 53 | + target = entry['sentence_annotations'][0]['final_sentence'] |
| 54 | + entry.pop('sentence_annotations') |
| 55 | + input = json.dumps(entry) |
| 56 | + elif (arg.input_string == 'subtable_str_plus_subtable_metadata_str'): |
| 57 | + target = entry['sentence_annotations'][0]['final_sentence'] |
| 58 | + input = entry['subtable_metadata_str'] + ' ' + entry['subtable_str'] |
| 59 | + inputs.append(input) |
| 60 | + targets.append(target) |
| 61 | + return inputs, targets |
| 62 | +def create_totto_datasets(arg): |
| 63 | + #arg.train_input, arg.development_input |
| 64 | + start = time.time() |
| 65 | + if (arg.toy_dataset): |
| 66 | + print('Using toy dataset..') |
| 67 | + |
| 68 | + inputs, targets = read_file(arg, arg.train_input) |
| 69 | + if (arg.toy_dataset): |
| 70 | + inputs = inputs[:arg.toy_dataset] |
| 71 | + targets = targets[:arg.toy_dataset] |
| 72 | + |
| 73 | + num_datapoints = len(inputs) |
| 74 | + val_set_size = max(1, int(0.125 * num_datapoints)) |
| 75 | + train, val = train_test_split(inputs, test_size=val_set_size, shuffle=False, random_state=0) |
| 76 | + train_t, val_t = train_test_split(targets, test_size=val_set_size, shuffle=False, random_state=0) |
| 77 | + |
| 78 | + |
| 79 | + training_set = Dataset(train, train_t) |
| 80 | + validation_set = Dataset(val, val_t) |
| 81 | + assert len(validation_set) == val_set_size, 'Validation size not matching' |
| 82 | + assert len(training_set) == (num_datapoints - val_set_size), 'Training size not matching' |
| 83 | + |
| 84 | + inputs, targets = read_file(arg, arg.development_input) |
| 85 | + if (arg.toy_dataset): |
| 86 | + inputs = inputs[:arg.toy_dataset] |
| 87 | + targets = targets[:arg.toy_dataset] |
| 88 | + test_set = Dataset(inputs, targets) |
| 89 | + end = time.time() |
| 90 | + print('Time taken to create datasets is %0.2f mins'%((end-start)/60)) |
| 91 | + return training_set, validation_set, test_set |
| 92 | + |
| 93 | +def create_cnn_dailymail_datasets(arg): |
| 94 | + def convert(dataset, arg): |
| 95 | + inputs = [] |
| 96 | + targets = [] |
| 97 | + for i in np.arange(len(dataset)): |
| 98 | + i = int(i) |
| 99 | + # print(dataset[i]) |
| 100 | + inputs.append(dataset[i]['article']) |
| 101 | + targets.append(dataset[i]['highlights']) |
| 102 | + if (arg.toy_dataset): |
| 103 | + inputs = inputs[:arg.toy_dataset] |
| 104 | + targets = targets[:arg.toy_dataset] |
| 105 | + return Dataset(inputs, targets) |
| 106 | + |
| 107 | + train_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]") |
| 108 | + val_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]") |
| 109 | + test_dataset = nlp.load_dataset("cnn_dailymail", "3.0.0", split="test[:1%]") |
| 110 | + |
| 111 | + training_set = convert(train_dataset, arg) |
| 112 | + validation_set = convert(val_dataset, arg) |
| 113 | + test_set = convert(test_dataset, arg) |
| 114 | + return training_set, validation_set, test_set |
| 115 | + |
| 116 | + |
| 117 | +def create_dataloaders(arg, training_set, validation_set, test_set): |
| 118 | + start = time.time() |
| 119 | + trainloader, valloader, testloader = None, None, None |
| 120 | + if (training_set is not None): |
| 121 | + trainloader=torch.utils.data.DataLoader(training_set, batch_size=arg.batch_size, shuffle=arg.shuffle_train, num_workers=2) |
| 122 | + if (validation_set is not None): |
| 123 | + valloader=torch.utils.data.DataLoader(validation_set, batch_size=arg.batch_size, shuffle=False, num_workers=2) |
| 124 | + if (test_set is not None): |
| 125 | + testloader = torch.utils.data.DataLoader(test_set, batch_size=arg.batch_size, shuffle=False, num_workers=2) |
| 126 | + end = time.time() |
| 127 | + return trainloader, valloader, testloader |
| 128 | + |
| 129 | + |
0 commit comments