import torch import tqdm import numpy as np import evaluate import json import random import math from tqdm.auto import tqdm from transformers import BertTokenizer, RobertaTokenizer, BertModel, RobertaModel, RobertaPreTrainedModel, RobertaConfig, BertConfig, BertPreTrainedModel, PreTrainedModel, AutoModel, AutoTokenizer, AutoConfig from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from transformers import AdamW, get_scheduler from torch import nn from torch.nn import CrossEntropyLoss import matplotlib.pyplot as plt import os import pandas as pd import sklearn #metric=evaluate.load("accuracy") torch.cuda.empty_cache() def reposition(dp, old_dataset=False): """Reposition fucntion to find the character level indices of the metonymy (to map back in tokenier_new function by char_to_tokens) params: dp -> json readin of li et al shaped dataset returns: new_start -> int: new start position of metonymy on character level (including whitespaces) new_end -> int: new end position of metonymy on character level(including whitespaces)""" new_start=0 new_end=0 if old_dataset ==False: new_dp= " ".join(dp["sentence"]).lower() if dp["pos"][0]==0: new_start=len(" ".join(dp["sentence"][:dp["pos"][0]])) else: new_start=len(" ".join(dp["sentence"][:dp["pos"][0]]))+1 new_end=len(" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]))+new_start assert new_dp[new_start:new_end] == " ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower() elif old_dataset ==True: new_dp= " ".join(dp["sentence"][1]).lower() if dp["pos"][0]==0: new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]])) else: new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]]))+1 new_end=len(" ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]))+new_start assert new_dp[new_start:new_end] == " ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower() return new_start, new_end def tokenizer_new(tokenizer, input, max_length, masked=False, old_dataset=False, context=None): """ Tokenizing function to tokenize a li et al shaped dataset (list of dictionaries) to create inputs for BERt/RoBERTa 1. encode the input and find the metonymy tokens (via reposition function) 2. map the tokens back to strings to verify the correctnes of the token indices 3. add all attention masks, tokentype ids (only for BERT), input ids, labels, new start positions, new end positions to dataset params: tokenizer -> AutoTokenizer.from_pretrained('tokenizer_name'): BERT/RoBERTa tokenizer input -> dataset to tokenize (json read in) max_length -> int:max length to pad masked -> bool:mask metonymies? context -> left right or balaces: add context from left or right? fill up on other side if not enough (left or right) """ print("swp tokenizer") all_start_positions=[] all_end_positions=[] all_labels=[] all_attention_masks=[] all_token_type_ids=[] all_input_ids=[] for dp in input: if masked == True: dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #if old_dataset == False: #find new char-pos for metonymic word #new_start_pos, new_end_pos = reposition(dp) #old_target=" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower() #### implement rest: tokenize metonymic sentence and encode the rest and pad with it #old_target=new_dp[dp["pos"][0]: dp["pos"][1]].lower() #old target already on character level #assert new_dp[new_start_pos:new_end_pos].strip() == "".join(dp["sentence"][dp["pos"][0]: dp["pos"][1]]).lower() if old_dataset == False: new_start_pos, new_end_pos = reposition(dp, old_dataset=False) new_dp= " ".join(dp["sentence"]).lower() encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True) #tf_tokens=tokenizer.convert_ids_to_tokens(encoded_inp["input_ids"]) #print(tf_tokens) #print(typ(encoded_inp)) #print("start pos: ", new_start_pos) #print("end pos: ", new_end_pos) #print("sentence: ", new_dp) old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower() else: print("new dataset") new_start_pos, new_end_pos = reposition(dp, old_dataset=True) new_dp= " ".join(dp["sentence"][1]).lower() encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True) #dont add max length and padding so we can do it manually length_metonymies = len(encoded_inp["input_ids"]) context_len=max_length - length_metonymies #length of how much context tokens we can add. We add from left to right #print("metonymy sentece: ", new_dp) #print("metonymy sentence inputs: ", encoded_inp) #print("context length: ", context_len) inp_before=" ".join(dp["sentence"][0]).lower() #print("input before: ", inp_before) encoded_inp_before=tokenizer.encode_plus(inp_before, add_special_tokens=True) #encode before and after context #print("encoded inputs before: ", encoded_inp_before) #print("\n") inp_after=" ".join(dp["sentence"][2]).lower() #print("input after: ", inp_after) encoded_inp_after=tokenizer.encode_plus(inp_after , add_special_tokens=True) #print("encoded inputs after: ", encoded_inp_after) #print("\n") #print("\n") #Preprare input for new dictionary with context context_input_ids=[] context_attention_masks=[] if tokenizer.name_or_path[0] == "b": #BER Tokenizer has token type ids too context_token_type_ids=[] length_before=len(encoded_inp_before["input_ids"]) length_after=len(encoded_inp_after["input_ids"]) if length_before>=context_len/2 and length_after>=context_len/2: index_before=int(context_len/2) index_after=int(context_len/2) elif length_before<context_len/2 and length_after>=context_len/2: index_before=length_before difference_before=(context_len/2)-length_before wanted_from_after=(context_len/2)+difference_before if wanted_from_after>=length_after: index_after=length_after else: index_after=int(math.ceil(wanted_from_after)) elif length_after<context_len/2 and length_before>=context_len/2: index_after=length_after difference_after=(context_len/2)-length_after wanted_from_before=(context_len/2)+difference_after if wanted_from_before >=length_before: index_before=length_before else: index_before=int(math.ceil(wanted_from_before)) elif length_before<context_len/2 and length_after<context_len/2: index_before=length_before index_after=length_after #print("len before: ", length_before) #print("len after: ", length_after) #print("index_before: ", index_before) #print("index_after: ", index_after) #print("not used: ", context_len-index_before-index_after) #Use the calculated indices to append the right tokens and pad to 512 if needed, recalculate metonymy position and prepare for decoding metonymy #before_decoded="".join(tokenizer.decode(encoded_inp_before["input_ids"][length_before-index_before:length_before])) #if tokenizer.name_or_path[0]=="b": # before_decoded.replace("[CLS]", "") #.replace(" [SEP]", "") #print(before_decoded) context_input_ids=context_input_ids + encoded_inp_before["input_ids"][length_before-index_before:length_before] context_input_ids=context_input_ids + encoded_inp["input_ids"] context_input_ids=context_input_ids + encoded_inp_after["input_ids"][length_after-index_after:length_after] context_input_ids=context_input_ids+([0]*(512-len(context_input_ids))) #pad #print("new input ids: ", len(context_input_ids)) context_attention_masks= context_attention_masks+encoded_inp_before["attention_mask"][length_before-index_before:length_before] context_attention_masks=context_attention_masks+encoded_inp["attention_mask"] context_attention_masks=context_attention_masks+encoded_inp_after["attention_mask"][length_after-index_after:length_after] context_attention_masks=context_attention_masks+([0]* (512-len(context_attention_masks))) #pad #print("new attention maks: ", len(context_attention_masks)) if tokenizer.name_or_path[0] == "b": #BER Tokenizer has token type ids too context_token_type_ids=context_token_type_ids + encoded_inp_before["token_type_ids"][length_before-index_before:length_before] context_token_type_ids=context_token_type_ids +encoded_inp["token_type_ids"] context_token_type_ids=context_token_type_ids +encoded_inp_after["token_type_ids"][length_after-index_after:length_after] context_token_type_ids=context_token_type_ids+([0]*(512-len(context_token_type_ids))) #print("new token type ids: ", len(context_token_type_ids)) assert len(context_token_type_ids) == 512 assert len(context_input_ids) == 512 and len(context_attention_masks) == 512 #get tokeniized words for before sentence and the metonymy sentence tokenized_before=[] for i in range(len(" ".join(dp["sentence"][0]).lower())): tokenized_before.append((encoded_inp.char_to_token(i, sequence_index=0))) #print(tokenized_before) #tokenized_words = [] #for i in range(len(new_dp)): #range(len(new_dp)) # tokenized_words.append((encoded_inp.char_to_token(i, sequence_index=0))) #print(tokenized_words) #span=[] #for i in tokenized_words[new_start_pos:new_end_pos]: # if i is not None: # span.append(i+len(encoded_inp_before["input_ids"])) #new_start_pos=new_start_pos+len(encoded_inp_before["input_ids"]) #update inces by adding the number of tokens that are in before sentence #new_end_pos=new_end_pos+len(encoded_inp_before["input_ids"]) #print(span) #indices_to_tokens=list(set(span)) #indices_to_tokens.sort() #print(indices_to_tokens) #if len(indices_to_tokens)==1: # print("decoding 1") # decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]])).strip().replace(" ", "") #else: # print("decoding 2") # #print("indices_to_tokens: ", indices_to_tokens) # decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "") #print(decoded) old_target="".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower() #print("old_target: ", old_target) #make an encoded_inp dictionary -> not needed, because we use lists directly #encoded_inp={"input_ids": context_input_ids, "attention_mask": context_attention_masks} #if tokenizer.name_or_path[0] =="b": # encoded_inp["token_type_ids"]=context_token_type_ids #print(encoded_inp) #print(len(encoded_inp["input_ids"])) #li et al approach """ if old_dataset==False: orig_to_tok_index2=[] all_tokens2 = ['[CLS]'] for (i, token) in enumerate(dp["sentence"]): orig_to_tok_index2.append(len(all_tokens2)) sub_tokens = tokenizer#.tokenize(token) for sub_token in sub_tokens: all_tokens2.append(sub_token) orig_to_tok_index2.append(len(all_tokens2)) new_target="".join(tf_tokens[orig_to_tok_index2[dp["pos"][0]]:orig_to_tok_index2[dp["pos"][1]]]).replace("##", "").lower() print("orig to tok index: ", [orig_to_tok_index2[dp["pos"][0]], orig_to_tok_index2[dp["pos"][1]]]) print("new_target: ", repr(new_target)) """ tokenized_words = [] for i in range(len(new_dp)): #range(len(new_dp)) #if(new_dp[i])==" ": # continue #spaces are connected with the words with the roberta tokenizer and are thus always mapped to None tokenized_words.append((encoded_inp.char_to_token(i, sequence_index=0))) span=[] for i in tokenized_words[new_start_pos:new_end_pos]: if i is not None: if old_dataset==True: span.append(i+index_before) else: span.append(i) #if old_dataset==True: # new_start_pos=new_start_pos+len(encoded_inp_before["input_ids"]) #update inces by adding the number of tokens that are in before sentence # new_end_pos=new_end_pos+len(encoded_inp_before["input_ids"]) indices_to_tokens=list(set(span)) indices_to_tokens.sort() #print(indices_to_tokens) #print("indices to tokens: ", indices_to_tokens) if old_dataset==False: if len(indices_to_tokens)==1: #print("decoding 1") decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]])).strip().replace(" ", "") else: #print("decoding 2") #print("indices_to_tokens: ", indices_to_tokens) decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "") else: if len(indices_to_tokens)==1: #print("decoding 1") decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]])).strip().replace(" ", "") else: #print("decoding 2") #print("indices_to_tokens: ", indices_to_tokens) decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "") #print("newly_decoded: ", decoded) #old_dp=" ".join(dp["sentence"]).lower() #print(old_dp) #old_target="".join(old_dp[dp["pos"][0]: dp["pos"][1]]).lower() #old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower() if old_target!=decoded: print("wrong mapping") if old_dataset == True: print("new_start_pos: ", new_start_pos) print("lenght of before: ", len(encoded_inp_before["input_ids"])) print("lengh of after: ", len(encoded_inp_after["input_ids"])) print("after input ids: ", encoded_inp_after["input_ids"]) print("Used from before: ", index_before) print("Used from after: ", index_after) print("metonomy sentence length: ", len(encoded_inp["input_ids"])) print("left for filling: ", context_len) print("indices to tokens: ", indices_to_tokens) print("decoded: ", decoded) print("old target: ", old_target) print(dp) #print(old_dp) #mapping_counter+=1 continue all_start_positions.append(indices_to_tokens[0]) all_end_positions.append(indices_to_tokens[-1]+1) all_labels.append(dp["label"]) if old_dataset==False: all_input_ids.append(encoded_inp["input_ids"]) #print("len input ids: ", len(all_input_ids)) all_attention_masks.append(encoded_inp["attention_mask"]) else: all_input_ids.append(context_input_ids) #print("len input ids: ", len(all_input_ids)) all_attention_masks.append(context_attention_masks) if tokenizer.name_or_path[0] == "b": if old_dataset==False: all_token_type_ids.append(encoded_inp["token_type_ids"]) else: all_token_type_ids.append(context_token_type_ids) #if tokenizer.name_or_path[0] == "b": # print(len(all_start_positions)) #print("len end pos: ", len(all_end_positions)) #print("len all labels: ", len(all_labels)) #print("len attention masks: ", len(all_attention_masks[0])) #print("len start pos: ", len(all_start_positions)) #print("len toke type ids: ", len(all_token_type_ids[0])) if tokenizer.name_or_path[0] == "r": #if tokenizer is roberta we dont have token_type ids print("roberta tokenizer") dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda") , torch.tensor(all_attention_masks, dtype=torch.long).to("cuda") , torch.tensor(all_start_positions,dtype=torch.long).to("cuda"), torch.tensor(all_end_positions, dtype=torch.long).to("cuda"), torch.tensor(all_labels,dtype=torch.long).to("cuda")) if tokenizer.name_or_path[0] =="b": print("bert tokenizer") dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda"), torch.tensor(all_attention_masks, dtype=torch.long).to("cuda"), torch.tensor(all_token_type_ids, dtype=torch.long).to("cuda"), torch.tensor(all_start_positions,dtype=torch.long).to("cuda"), torch.tensor(all_end_positions, dtype=torch.long).to("cuda"), torch.tensor(all_labels,dtype=torch.long).to("cuda")) print("created dataset") #print(mapping_counter) return dataset class EncodedTokenDataset(torch.utils.data.Dataset): """ A dataset, containing encoded sentences, integer labels and the starting and ending position of the target word. """ def __init__(self, encodings, starts, ends, labels, instances): self.encodings = encodings self.labels = labels self.starts = starts self.ends = ends self.instances = instances def __getitem__(self, idx): item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()} if self.labels: item["labels"] = torch.tensor([self.labels[idx]]) item["start_position"] = torch.tensor([self.starts[idx]]) item["end_position"] = torch.tensor([self.ends[idx]]) return item def __len__(self) -> int: return len(self.instances) def salami_tokenizer(tokenizer, input, max_length, masked=False): print("salami tokenizer") bots_token, eots_token = "[bots]", "[eots]" tokenizer.add_tokens([bots_token, eots_token]) bots_id, eots_id = tokenizer.convert_tokens_to_ids( [bots_token, eots_token] ) # both are of type int instances=[] all_labels=[] all_input_ids=[] all_attention_masks=[] if tokenizer.name_or_path[0] =="b": all_token_type_ids=[] for dp in input: if masked == True: dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted new_sentence = " ".join(dp["sentence"][:dp["pos"][0]]) + bots_token + " " +" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]) + " "+eots_token + " ".join(dp["sentence"][dp["pos"][1]:]) #print(new_sentence) instances.append(new_sentence) all_labels.append(dp["label"]) #print("number of instances: ", len(instances)) encoded_inp=tokenizer(instances, padding=True, max_length=max_length, return_tensors="pt") start_pos=(encoded_inp["input_ids"] == bots_id).nonzero()[:, 1] end_pos=(encoded_inp["input_ids"] == eots_id).nonzero()[:, 1]-1 #print("start_pos: ", start_pos) #print("end_pos: ", end_pos) #print(all_labels) for input_info_name in encoded_inp.keys(): input_information=[] for i, t in enumerate(encoded_inp[input_info_name]): t=t.tolist() t.pop(start_pos[i].item()) t.pop(end_pos[i].item()) input_information.append(t) encoded_inp[input_info_name] = torch.tensor(input_information) #start_pos=start_pos.tolist() #end_pos=end_pos.tolist() return EncodedTokenDataset(encoded_inp, start_pos, end_pos, all_labels, instances) def tokenizer_li(input, max_length, masked=False): tokenizer=BertTokenizer.from_pretrained("bert-base-uncased") all_input_ids=[] all_attention_masks=[] all_token_type_ids=[] all_start_positions=[] all_end_positions=[] all_labels=[] for dp in input: if masked == True: dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted new_dp = " ".join(dp["sentence"]).lower() encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding=True) #encode Input with BertTokenizer tf_tokens = tokenizer.convert_ids_to_tokens(encoded_inp["input_ids"]) # orig_to_tok_index2=[] all_tokens2 = ['[CLS]'] for (i, token) in enumerate(dp["sentence"]): orig_to_tok_index2.append(len(all_tokens2)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: all_tokens2.append(sub_token) orig_to_tok_index2.append(len(all_tokens2)) if len(tf_tokens)>max_length: print("too long") continue old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower() #print("old taget: ", old_target) new_target="".join(tf_tokens[orig_to_tok_index2[dp["pos"][0]]:orig_to_tok_index2[dp["pos"][1]]]).replace("##", "").lower() #print("new old_target: ", new_target) if old_target != new_target: print("wrong mapping") #check right mapping of positions print(old_target) print(new_target) continue assert len(encoded_inp["input_ids"]) == len(encoded_inp["attention_mask"]) #default wise its 1s (in li et al implementation) #pad the attention masks, input ids, and token type ids with padding_length: padding_length=max_length-len(encoded_inp["input_ids"]) input_ids = encoded_inp["input_ids"]+([0]*padding_length) attention_mask = encoded_inp["attention_mask"] + ([0] * padding_length) token_type_ids = encoded_inp["token_type_ids"] + ([0] * padding_length) #add the infos to dict all_input_ids.append(input_ids) all_attention_masks.append(attention_mask) all_token_type_ids.append(token_type_ids) all_start_positions.append(dp["pos"][0]) #do we not have to update the positions? all_end_positions.append(dp["pos"][1]) all_labels.append(dp["label"]) #turn all the data into a dataset to return dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long), torch.tensor(all_attention_masks, dtype=torch.long), torch.tensor(all_token_type_ids, dtype=torch.long), torch.tensor(all_start_positions,dtype=torch.long), torch.tensor(all_end_positions, dtype=torch.long), torch.tensor(all_labels,dtype=torch.long)) return dataset#encoded_inp, tf_tokens, old_target, new_target #input_ids, attention_mask #, sub_tokens #test , orig_to_tok_index, def split_dataset(train, name_train, name_dev): """Split function (optional) to create random stratified sampled dev set from train dataset. Extracts 10% from train dataset and keeps distribution of metonymy vs literals params: train -> json output (shape of li et al datasets): train dataset name_train -> str: name of the file where you want to save the train set (without .txt) name_dev -> str: name of the file to save dev set (without .txt)""" metonymies=[] literals=[] per_of_train=math.ceil(0.1*len(train)) for el in train: if el["label"] == 0: literals.append(el) else: metonymies.append(el) amount_m = int(0.1*(len(metonymies))) amount_l = int(0.1*(len(literals))) print("removing {0} samples from {1} metonymies and {2} samples from {3} literals".format(amount_m, len(metonymies), amount_l, len(literals))) dev = [] for i in range(amount_l+1): #+1 because range is exclusive selected = random.choice(literals) # is random.choice() random enough? dev.append(selected) #add to dev set... train.remove(selected) #and remove from train literals.remove(selected) for i in range(amount_m+1): selected=random.choice(metonymies) dev.append(selected) train.remove(selected) metonymies.remove(selected) assert len(dev) == per_of_train random.shuffle(dev) with open(name_train+".json", "w") as outfile: json.dump(train, outfile) with open(name_dev+".json", "w") as outfile: json.dump(dev, outfile) return train, dev