Newer
Older
import torch
import tqdm
import numpy as np
import evaluate
import json
import random
import math
from tqdm.auto import tqdm
from transformers import BertTokenizer, RobertaTokenizer, BertModel, RobertaModel, RobertaPreTrainedModel, RobertaConfig, BertConfig, BertPreTrainedModel, PreTrainedModel, AutoModel, AutoTokenizer, AutoConfig
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from transformers import AdamW, get_scheduler
from torch import nn
from torch.nn import CrossEntropyLoss
import matplotlib.pyplot as plt
import os
import pandas as pd
import sklearn
#metric=evaluate.load("accuracy")
torch.cuda.empty_cache()
def reposition(dp, old_dataset=False):
"""Reposition function to find the character level indices of the metonymy (to map back in tokenizer_new
dp -> json readin of li et al shaped dataset(or original dataset)
old_dataset: bool -> Wheter or not Li et al Datasets are used (True:no, False:yes)
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
returns:
new_start -> int: new start position of metonymy on character level (including whitespaces)
new_end -> int: new end position of metonymy on character level(including whitespaces)"""
new_start=0
new_end=0
if old_dataset ==False:
new_dp= " ".join(dp["sentence"]).lower()
if dp["pos"][0]==0:
new_start=len(" ".join(dp["sentence"][:dp["pos"][0]]))
else:
new_start=len(" ".join(dp["sentence"][:dp["pos"][0]]))+1
new_end=len(" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]))+new_start
assert new_dp[new_start:new_end] == " ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()
elif old_dataset ==True:
new_dp= " ".join(dp["sentence"][1]).lower()
if dp["pos"][0]==0:
new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]]))
else:
new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]]))+1
new_end=len(" ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]))+new_start
assert new_dp[new_start:new_end] == " ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower()
return new_start, new_end
def tokenizer_new(tokenizer, input, max_length, masked=False, old_dataset=False, context=None):
""" Tokenizing function to tokenize a li et al shaped dataset (list of dictionaries) to create inputs for BERt/RoBERTa
1. encode the input and find the metonymy tokens (via reposition function)
2. map the tokens back to strings to verify the correctnes of the token indices
3. add all attention masks, tokentype ids (only for BERT), input ids, labels, new start positions, new end positions
to dataset
params:
tokenizer -> AutoTokenizer.from_pretrained('tokenizer_name'): BERT/RoBERTa tokenizer
input -> dataset to tokenize (json read in)
max_length -> int:max length to pad
masked -> bool:mask metonymies?
context -> left right or balaces: add context from left or right? fill up on other side if not enough (left or right)
"""
all_start_positions=[]
all_end_positions=[]
all_labels=[]
all_attention_masks=[]
all_token_type_ids=[]
all_input_ids=[]
for dp in input:
if masked == True:
dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>"
if old_dataset == False:
#if Li et al dataset: reposition positions on character level, encode sentence and extract target
new_start_pos, new_end_pos = reposition(dp, old_dataset=False)
new_dp= " ".join(dp["sentence"]).lower()
encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True)
old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()
else:
#If old dataset(original Markert semeval): reposition and add context
new_start_pos, new_end_pos = reposition(dp, old_dataset=True)
new_dp= " ".join(dp["sentence"][1]).lower()
encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True) #dont add max length and padding so we can do it manually
length_metonymies = len(encoded_inp["input_ids"])
context_len=max_length - length_metonymies #length of how much context tokens we can add. We add from left to right
inp_before=" ".join(dp["sentence"][0]).lower()
encoded_inp_before=tokenizer.encode_plus(inp_before, add_special_tokens=True) #encode before and after context
inp_after=" ".join(dp["sentence"][2]).lower()
encoded_inp_after=tokenizer.encode_plus(inp_after , add_special_tokens=True)
#Preprare input for new dictionary with context
context_input_ids=[]
context_attention_masks=[]
if tokenizer.name_or_path[0] == "b": #BERT Tokenizer has token type ids too
context_token_type_ids=[]
length_before=len(encoded_inp_before["input_ids"])
length_after=len(encoded_inp_after["input_ids"])
if length_before>=context_len/2 and length_after>=context_len/2:
index_before=int(context_len/2)
index_after=int(context_len/2)
elif length_before<context_len/2 and length_after>=context_len/2:
index_before=length_before
difference_before=(context_len/2)-length_before
wanted_from_after=(context_len/2)+difference_before
if wanted_from_after>=length_after:
index_after=length_after
else:
index_after=int(math.ceil(wanted_from_after))
elif length_after<context_len/2 and length_before>=context_len/2:
index_after=length_after
difference_after=(context_len/2)-length_after
wanted_from_before=(context_len/2)+difference_after
if wanted_from_before >=length_before:
index_before=length_before
else:
index_before=int(math.ceil(wanted_from_before))
elif length_before<context_len/2 and length_after<context_len/2:
index_before=length_before
index_after=length_after
context_input_ids=context_input_ids + encoded_inp_before["input_ids"][length_before-index_before:length_before]
context_input_ids=context_input_ids + encoded_inp["input_ids"]
context_input_ids=context_input_ids + encoded_inp_after["input_ids"][length_after-index_after:length_after]
context_input_ids=context_input_ids+([0]*(512-len(context_input_ids))) #pad
context_attention_masks= context_attention_masks+encoded_inp_before["attention_mask"][length_before-index_before:length_before]
context_attention_masks=context_attention_masks+encoded_inp["attention_mask"]
context_attention_masks=context_attention_masks+encoded_inp_after["attention_mask"][length_after-index_after:length_after]
context_attention_masks=context_attention_masks+([0]* (512-len(context_attention_masks))) #pad
if tokenizer.name_or_path[0] == "b": #BERT Tokenizer has token type ids too
context_token_type_ids=context_token_type_ids + encoded_inp_before["token_type_ids"][length_before-index_before:length_before]
context_token_type_ids=context_token_type_ids +encoded_inp["token_type_ids"]
context_token_type_ids=context_token_type_ids +encoded_inp_after["token_type_ids"][length_after-index_after:length_after]
context_token_type_ids=context_token_type_ids+([0]*(512-len(context_token_type_ids)))
assert len(context_token_type_ids) == 512
#make sure we pad to maximum
assert len(context_input_ids) == 512 and len(context_attention_masks) == 512
#get tokenized words for before sentence and the metonymy sentence
tokenized_before=[]
for i in range(len(" ".join(dp["sentence"][0]).lower())):
tokenized_before.append((encoded_inp.char_to_token(i, sequence_index=0)))
old_target="".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower()
tokenized_words = []
for i in range(len(new_dp)):
tokenized_words.append((encoded_inp.char_to_token(i, sequence_index=0)))
span=[]
for i in tokenized_words[new_start_pos:new_end_pos]:
if i is not None:
if old_dataset==True:
span.append(i+index_before)
else:
span.append(i)
indices_to_tokens=list(set(span))
indices_to_tokens.sort()
#decode new positioned tokens to check for false mapping
if old_dataset==False:
if len(indices_to_tokens)==1:
decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]])).strip().replace(" ", "")
else:
decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "")
else:
if len(indices_to_tokens)==1:
decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]])).strip().replace(" ", "")
else:
decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "")
if old_target!=decoded:
print("wrong mapping")
continue
all_start_positions.append(indices_to_tokens[0])
all_end_positions.append(indices_to_tokens[-1]+1)
all_labels.append(dp["label"])
if old_dataset==False:
all_input_ids.append(encoded_inp["input_ids"])
all_attention_masks.append(encoded_inp["attention_mask"])
else:
all_input_ids.append(context_input_ids)
all_attention_masks.append(context_attention_masks)
if tokenizer.name_or_path[0] == "b":
if old_dataset==False:
all_token_type_ids.append(encoded_inp["token_type_ids"])
else:
all_token_type_ids.append(context_token_type_ids)
print("roberta tokenizer")
dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda") ,
torch.tensor(all_attention_masks, dtype=torch.long).to("cuda") ,
torch.tensor(all_start_positions,dtype=torch.long).to("cuda"),
torch.tensor(all_end_positions, dtype=torch.long).to("cuda"),
torch.tensor(all_labels,dtype=torch.long).to("cuda"))
if tokenizer.name_or_path[0] =="b":
print("bert tokenizer")
dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda"),
torch.tensor(all_attention_masks, dtype=torch.long).to("cuda"),
torch.tensor(all_token_type_ids, dtype=torch.long).to("cuda"),
torch.tensor(all_start_positions,dtype=torch.long).to("cuda"),
torch.tensor(all_end_positions, dtype=torch.long).to("cuda"),
torch.tensor(all_labels,dtype=torch.long).to("cuda"))
def tokenizer_imdb(tokenizer, dataset, max_length):
"""Tokenizer for imdb dataset (for validation of our tmix implementation.
Params:
tokenizer: AutoTokenizer -> Tokenizer (in out case BERT base uncased)
dataset: list of dicts -> dataset (imdb from huggingface) to be preprocessed
max_length: int -> maximum length for padding/truncation"""
all_input_ids=[]
all_attention_masks=[]
all_token_type_ids=[]
all_labels=[]
for dp in dataset:
encoded_inp=tokenizer.encode_plus(dp["text"], add_special_tokens=True, max_length=max_length, truncation=True, padding="max_length")
all_labels.append(dp["label"])
all_input_ids.append(encoded_inp["input_ids"])
all_attention_masks.append(encoded_inp["attention_mask"])
all_token_type_ids.append(encoded_inp["token_type_ids"])
dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda"), torch.tensor(all_attention_masks, dtype=torch.long).to("cuda"), torch.tensor(all_token_type_ids, dtype=torch.long).to("cuda"), torch.tensor(all_labels, dtype=torch.long).to("cuda"))
print("created imdb dataset")
return dataset
class EncodedTokenDataset(torch.utils.data.Dataset):
"""
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
A dataset, containing encoded sentences, integer labels and
the starting and ending position of the target word.
"""
def __init__(self, encodings, starts, ends, labels, instances):
self.encodings = encodings
self.labels = labels
self.starts = starts
self.ends = ends
self.instances = instances
def __getitem__(self, idx):
item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor([self.labels[idx]])
item["start_position"] = torch.tensor([self.starts[idx]])
item["end_position"] = torch.tensor([self.ends[idx]])
return item
def __len__(self) -> int:
return len(self.instances)
def salami_tokenizer(tokenizer, input, max_length, masked=False):
"""Salami tokenizer for input sentences (Used together with EncodedTokenDataset)"""
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
bots_token, eots_token = "[bots]", "[eots]"
tokenizer.add_tokens([bots_token, eots_token])
bots_id, eots_id = tokenizer.convert_tokens_to_ids(
[bots_token, eots_token]
) # both are of type int
instances=[]
all_labels=[]
all_input_ids=[]
all_attention_masks=[]
if tokenizer.name_or_path[0] =="b":
all_token_type_ids=[]
for dp in input:
if masked == True:
dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted
new_sentence = " ".join(dp["sentence"][:dp["pos"][0]]) + bots_token + " " +" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]) + " "+eots_token + " ".join(dp["sentence"][dp["pos"][1]:])
#print(new_sentence)
instances.append(new_sentence)
all_labels.append(dp["label"])
#print("number of instances: ", len(instances))
encoded_inp=tokenizer(instances, padding=True, max_length=max_length, return_tensors="pt")
start_pos=(encoded_inp["input_ids"] == bots_id).nonzero()[:, 1]
end_pos=(encoded_inp["input_ids"] == eots_id).nonzero()[:, 1]-1
#print("start_pos: ", start_pos)
#print("end_pos: ", end_pos)
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
for input_info_name in encoded_inp.keys():
input_information=[]
for i, t in enumerate(encoded_inp[input_info_name]):
t=t.tolist()
t.pop(start_pos[i].item())
t.pop(end_pos[i].item())
input_information.append(t)
encoded_inp[input_info_name] = torch.tensor(input_information)
#start_pos=start_pos.tolist()
#end_pos=end_pos.tolist()
return EncodedTokenDataset(encoded_inp, start_pos, end_pos, all_labels, instances)
def tokenizer_li(input, max_length, masked=False):
tokenizer=BertTokenizer.from_pretrained("bert-base-uncased")
all_input_ids=[]
all_attention_masks=[]
all_token_type_ids=[]
all_start_positions=[]
all_end_positions=[]
all_labels=[]
for dp in input:
if masked == True:
dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted
new_dp = " ".join(dp["sentence"]).lower()
encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding=True) #encode Input with BertTokenizer
tf_tokens = tokenizer.convert_ids_to_tokens(encoded_inp["input_ids"]) #
orig_to_tok_index2=[]
all_tokens2 = ['[CLS]']
for (i, token) in enumerate(dp["sentence"]):
orig_to_tok_index2.append(len(all_tokens2))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
all_tokens2.append(sub_token)
orig_to_tok_index2.append(len(all_tokens2))
if len(tf_tokens)>max_length:
print("too long")
continue
old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()
#print("old taget: ", old_target)
new_target="".join(tf_tokens[orig_to_tok_index2[dp["pos"][0]]:orig_to_tok_index2[dp["pos"][1]]]).replace("##", "").lower()
#print("new old_target: ", new_target)
if old_target != new_target:
print("wrong mapping") #check right mapping of positions
print(old_target)
print(new_target)
continue
assert len(encoded_inp["input_ids"]) == len(encoded_inp["attention_mask"]) #default wise its 1s (in li et al implementation)
#pad the attention masks, input ids, and token type ids with padding_length:
padding_length=max_length-len(encoded_inp["input_ids"])
input_ids = encoded_inp["input_ids"]+([0]*padding_length)
attention_mask = encoded_inp["attention_mask"] + ([0] * padding_length)
token_type_ids = encoded_inp["token_type_ids"] + ([0] * padding_length)
#add the infos to dict
all_input_ids.append(input_ids)
all_attention_masks.append(attention_mask)
all_token_type_ids.append(token_type_ids)
all_start_positions.append(dp["pos"][0]) #do we not have to update the positions?
all_end_positions.append(dp["pos"][1])
all_labels.append(dp["label"])
#turn all the data into a dataset to return
dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long),
torch.tensor(all_attention_masks, dtype=torch.long),
torch.tensor(all_token_type_ids, dtype=torch.long),
torch.tensor(all_start_positions,dtype=torch.long),
torch.tensor(all_end_positions, dtype=torch.long),
torch.tensor(all_labels,dtype=torch.long))
return dataset#encoded_inp, tf_tokens, old_target, new_target #input_ids, attention_mask #, sub_tokens #test , orig_to_tok_index,
def split_dataset(train, name_train, name_dev):
"""Split function (optional) to create random stratified sampled dev set from train dataset. Extracts 10% from
train dataset and keeps distribution of metonymy vs literals
params:
train -> json output (shape of li et al datasets): train dataset
name_train -> str: name of the file where you want to save the train set (without .txt)
name_dev -> str: name of the file to save dev set (without .txt)"""
metonymies=[]
literals=[]
per_of_train=math.ceil(0.1*len(train))
for el in train:
if el["label"] == 0:
literals.append(el)
else:
metonymies.append(el)
amount_m = int(0.1*(len(metonymies)))
amount_l = int(0.1*(len(literals)))
print("removing {0} samples from {1} metonymies and {2} samples from {3} literals".format(amount_m, len(metonymies), amount_l, len(literals)))
dev = []
for i in range(amount_l+1): #+1 because range is exclusive
selected = random.choice(literals) # is random.choice() random enough?
dev.append(selected) #add to dev set...
train.remove(selected) #and remove from train
literals.remove(selected)
for i in range(amount_m+1):
selected=random.choice(metonymies)
dev.append(selected)
train.remove(selected)
metonymies.remove(selected)
assert len(dev) == per_of_train
random.shuffle(dev)
with open(name_train+".json", "w") as outfile:
json.dump(train, outfile)
with open(name_dev+".json", "w") as outfile:
json.dump(dev, outfile)