Skip to content
Snippets Groups Projects
preprocess.py 17.5 KiB
Newer Older
kulcsar's avatar
kulcsar committed
import torch
import tqdm
import numpy as np
import evaluate
import json
import random
import math
from tqdm.auto import tqdm
from transformers import BertTokenizer, RobertaTokenizer, BertModel, RobertaModel, RobertaPreTrainedModel, RobertaConfig,  BertConfig, BertPreTrainedModel, PreTrainedModel, AutoModel, AutoTokenizer, AutoConfig
kulcsar's avatar
kulcsar committed
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from transformers import AdamW, get_scheduler
from torch import nn
from torch.nn import CrossEntropyLoss
import matplotlib.pyplot as plt
import os
import pandas as pd
import sklearn

#metric=evaluate.load("accuracy")
kulcsar's avatar
kulcsar committed
torch.cuda.empty_cache()


def reposition(dp, old_dataset=False):
	"""Reposition function to find the character level indices of the metonymy (to map back in tokenizer_new
kulcsar's avatar
kulcsar committed
	function by char_to_tokens)
	params:
		dp -> json readin of li et al shaped dataset(or original dataset)
		old_dataset: bool -> Wheter or not Li et al Datasets are used (True:no, False:yes)
kulcsar's avatar
kulcsar committed

	returns:
		new_start -> int: new start position of metonymy on character level (including whitespaces)
		new_end -> int: new end position of metonymy on character level(including whitespaces)"""
	new_start=0
	new_end=0
	if old_dataset ==False:
		new_dp= " ".join(dp["sentence"]).lower()
		if dp["pos"][0]==0:
			new_start=len(" ".join(dp["sentence"][:dp["pos"][0]]))
		else:
			new_start=len(" ".join(dp["sentence"][:dp["pos"][0]]))+1
		new_end=len(" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]))+new_start


		assert new_dp[new_start:new_end] == " ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()

	elif old_dataset ==True:
		new_dp= " ".join(dp["sentence"][1]).lower()
	
		if dp["pos"][0]==0:
			new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]]))
		else:
			new_start=len(" ".join(dp["sentence"][1][:dp["pos"][0]]))+1
		new_end=len(" ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]))+new_start

		assert new_dp[new_start:new_end] == " ".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower()

	return new_start, new_end


def tokenizer_new(tokenizer, input, max_length, masked=False, old_dataset=False, context=None):
	""" Tokenizing function to tokenize a li et al shaped dataset (list of dictionaries) to create inputs for BERt/RoBERTa
	1. encode the input and find the metonymy tokens (via reposition function)
	2. map the tokens back to strings to verify the correctnes of the token indices
	3. add all attention masks, tokentype ids (only for BERT), input ids, labels, new start positions, new end positions
		to dataset

	params:
		tokenizer -> AutoTokenizer.from_pretrained('tokenizer_name'): BERT/RoBERTa tokenizer
		input -> dataset to tokenize (json read in)
		max_length -> int:max length to pad
		masked -> bool:mask metonymies?
		context -> left right or balaces: add context from left or right? fill up on other side if not enough (left or right)
	"""
	print("swp tokenizer")	
kulcsar's avatar
kulcsar committed
	all_start_positions=[]
	all_end_positions=[]
	all_labels=[]
	all_attention_masks=[]
	all_token_type_ids=[]
	all_input_ids=[]
	for dp in input:

		if masked == True:
kulcsar's avatar
kulcsar committed
			print("masked: ", masked)
kulcsar's avatar
kulcsar committed
			dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" 
	
		

		if old_dataset == False:
			#if Li et al dataset: reposition positions on character level, encode sentence and extract target 
kulcsar's avatar
kulcsar committed
			new_start_pos, new_end_pos = reposition(dp, old_dataset=False)
			new_dp= " ".join(dp["sentence"]).lower()
			encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True)
			old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()
		else:
			#If old dataset(original Markert semeval): reposition and add context
kulcsar's avatar
kulcsar committed
			new_start_pos, new_end_pos = reposition(dp, old_dataset=True)
			new_dp= " ".join(dp["sentence"][1]).lower()
			encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True) #dont add max length and padding so we can do it manually
			length_metonymies = len(encoded_inp["input_ids"])
			context_len=max_length - length_metonymies #length of how much context tokens we can add. We add from left to right
			inp_before=" ".join(dp["sentence"][0]).lower()
			encoded_inp_before=tokenizer.encode_plus(inp_before, add_special_tokens=True) #encode before and after context
			inp_after=" ".join(dp["sentence"][2]).lower()
			encoded_inp_after=tokenizer.encode_plus(inp_after , add_special_tokens=True)
			
			#Preprare input for new dictionary with context
			context_input_ids=[] 
			context_attention_masks=[]
			if tokenizer.name_or_path[0] == "b": #BERT Tokenizer has token type ids too
kulcsar's avatar
kulcsar committed
				context_token_type_ids=[]

			length_before=len(encoded_inp_before["input_ids"])
			length_after=len(encoded_inp_after["input_ids"])

kulcsar's avatar
kulcsar committed
			if length_before>=context_len/2 and length_after>=context_len/2:
				index_before=int(context_len/2)
				index_after=int(context_len/2)

			elif length_before<context_len/2 and length_after>=context_len/2:
				index_before=length_before
				difference_before=(context_len/2)-length_before
				wanted_from_after=(context_len/2)+difference_before
				if wanted_from_after>=length_after:
					index_after=length_after
				else:
					index_after=int(math.ceil(wanted_from_after))

kulcsar's avatar
kulcsar committed
			elif length_after<context_len/2 and length_before>=context_len/2:
				index_after=length_after
				difference_after=(context_len/2)-length_after
				wanted_from_before=(context_len/2)+difference_after

				if wanted_from_before >=length_before:
					index_before=length_before
				else:
					index_before=int(math.ceil(wanted_from_before))
			elif length_before<context_len/2  and length_after<context_len/2:
				index_before=length_before
				index_after=length_after

			context_input_ids=context_input_ids + encoded_inp_before["input_ids"][length_before-index_before:length_before]
			context_input_ids=context_input_ids + encoded_inp["input_ids"]
			context_input_ids=context_input_ids + encoded_inp_after["input_ids"][length_after-index_after:length_after] 
			context_input_ids=context_input_ids+([0]*(512-len(context_input_ids))) #pad


			context_attention_masks= context_attention_masks+encoded_inp_before["attention_mask"][length_before-index_before:length_before]
			context_attention_masks=context_attention_masks+encoded_inp["attention_mask"]
			context_attention_masks=context_attention_masks+encoded_inp_after["attention_mask"][length_after-index_after:length_after]
			context_attention_masks=context_attention_masks+([0]* (512-len(context_attention_masks))) #pad

			if tokenizer.name_or_path[0] == "b": #BERT Tokenizer has token type ids too
kulcsar's avatar
kulcsar committed
				context_token_type_ids=context_token_type_ids + encoded_inp_before["token_type_ids"][length_before-index_before:length_before]
				context_token_type_ids=context_token_type_ids +encoded_inp["token_type_ids"]
				context_token_type_ids=context_token_type_ids +encoded_inp_after["token_type_ids"][length_after-index_after:length_after]
				context_token_type_ids=context_token_type_ids+([0]*(512-len(context_token_type_ids)))
				assert len(context_token_type_ids) == 512
			
			#make sure we pad to maximum
kulcsar's avatar
kulcsar committed
			assert len(context_input_ids) == 512 and len(context_attention_masks) == 512
kulcsar's avatar
kulcsar committed

			#get tokenized words for before sentence and the metonymy sentence
kulcsar's avatar
kulcsar committed
			tokenized_before=[]
			for i in range(len(" ".join(dp["sentence"][0]).lower())):
				tokenized_before.append((encoded_inp.char_to_token(i, sequence_index=0)))

			old_target="".join(dp["sentence"][1][dp["pos"][0]:dp["pos"][1]]).lower()

		tokenized_words = []
		for i in range(len(new_dp)): 
kulcsar's avatar
kulcsar committed
			tokenized_words.append((encoded_inp.char_to_token(i, sequence_index=0)))

		span=[]
		for i in tokenized_words[new_start_pos:new_end_pos]:
			if i is not None:
				if old_dataset==True:
					span.append(i+index_before)
				else:
					span.append(i)

		indices_to_tokens=list(set(span))
		indices_to_tokens.sort()

		#decode new positioned tokens to check for false mapping
kulcsar's avatar
kulcsar committed
		if old_dataset==False: 
			if len(indices_to_tokens)==1:
				decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]])).strip().replace(" ", "")
			else:
				decoded="".join(tokenizer.decode(encoded_inp["input_ids"][indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "")
		else:
			if len(indices_to_tokens)==1:
				decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]])).strip().replace(" ", "")
			else:
				decoded="".join(tokenizer.decode(context_input_ids[indices_to_tokens[0]:indices_to_tokens[-1]+1])).strip().replace(" ", "")
kulcsar's avatar
kulcsar committed
		if old_target!=decoded:
			print("wrong mapping")
			continue

kulcsar's avatar
kulcsar committed
		all_start_positions.append(indices_to_tokens[0])
		all_end_positions.append(indices_to_tokens[-1]+1)
		all_labels.append(dp["label"])
		if old_dataset==False:
			all_input_ids.append(encoded_inp["input_ids"])
			all_attention_masks.append(encoded_inp["attention_mask"])
		else:
			all_input_ids.append(context_input_ids)
			all_attention_masks.append(context_attention_masks)

		if tokenizer.name_or_path[0] == "b":
			if old_dataset==False:
				all_token_type_ids.append(encoded_inp["token_type_ids"]) 
			else:
				all_token_type_ids.append(context_token_type_ids)


		print("roberta tokenizer")
		dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda") , 
							torch.tensor(all_attention_masks, dtype=torch.long).to("cuda") ,
							torch.tensor(all_start_positions,dtype=torch.long).to("cuda"),
							torch.tensor(all_end_positions, dtype=torch.long).to("cuda"),
							torch.tensor(all_labels,dtype=torch.long).to("cuda"))
kulcsar's avatar
kulcsar committed

	if tokenizer.name_or_path[0] =="b":
		print("bert tokenizer")
		dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda"), 
					torch.tensor(all_attention_masks, dtype=torch.long).to("cuda"),
					torch.tensor(all_token_type_ids, dtype=torch.long).to("cuda"),
					torch.tensor(all_start_positions,dtype=torch.long).to("cuda"),
					torch.tensor(all_end_positions, dtype=torch.long).to("cuda"),
					torch.tensor(all_labels,dtype=torch.long).to("cuda"))
kulcsar's avatar
kulcsar committed
	print("created dataset")

	return dataset

kulcsar's avatar
kulcsar committed
def tokenizer_imdb(tokenizer, dataset, max_length):
	"""Tokenizer for imdb dataset (for validation of our tmix implementation.
	
	Params: 
	tokenizer: AutoTokenizer -> Tokenizer (in out case BERT base uncased) 
	dataset: list of dicts   -> dataset (imdb from huggingface) to be preprocessed
	max_length: int 		 -> maximum length for padding/truncation"""
kulcsar's avatar
kulcsar committed
	all_input_ids=[]
	all_attention_masks=[]
	all_token_type_ids=[]
	all_labels=[]

	for dp in dataset:
kulcsar's avatar
kulcsar committed
		encoded_inp=tokenizer.encode_plus(dp["text"], add_special_tokens=True, max_length=max_length, truncation=True, padding="max_length")
kulcsar's avatar
kulcsar committed
		all_labels.append(dp["label"])
		all_input_ids.append(encoded_inp["input_ids"])
		all_attention_masks.append(encoded_inp["attention_mask"])
		all_token_type_ids.append(encoded_inp["token_type_ids"])
	dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long).to("cuda"), torch.tensor(all_attention_masks, dtype=torch.long).to("cuda"), torch.tensor(all_token_type_ids, dtype=torch.long).to("cuda"), torch.tensor(all_labels, dtype=torch.long).to("cuda"))
kulcsar's avatar
kulcsar committed
	print("created imdb dataset")
	return dataset


kulcsar's avatar
kulcsar committed

class EncodedTokenDataset(torch.utils.data.Dataset):
    """
	Salami Dataset Creator
kulcsar's avatar
kulcsar committed
    A dataset, containing encoded sentences, integer labels and
    the starting and ending position of the target word.
    """

    def __init__(self, encodings, starts, ends, labels, instances):
        self.encodings = encodings
        self.labels = labels
        self.starts = starts
        self.ends = ends
        self.instances = instances

    def __getitem__(self, idx):
        item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
        if self.labels:
            item["labels"] = torch.tensor([self.labels[idx]])
        item["start_position"] = torch.tensor([self.starts[idx]])
        item["end_position"] = torch.tensor([self.ends[idx]])

        return item

    def __len__(self) -> int:
        return len(self.instances)



def salami_tokenizer(tokenizer, input, max_length, masked=False):
	"""Salami tokenizer for input sentences (Used together with EncodedTokenDataset)"""
	print("salami tokenizer")
kulcsar's avatar
kulcsar committed
	bots_token, eots_token = "[bots]", "[eots]"
	tokenizer.add_tokens([bots_token, eots_token])
	bots_id, eots_id = tokenizer.convert_tokens_to_ids(
		[bots_token, eots_token]
	)  # both are of type int



	instances=[]
	all_labels=[]
	all_input_ids=[]
	all_attention_masks=[]
	if tokenizer.name_or_path[0] =="b":
		all_token_type_ids=[]
	for dp in input:
		if masked == True:
			dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted
		new_sentence = " ".join(dp["sentence"][:dp["pos"][0]]) + bots_token + " " +" ".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]) + " "+eots_token + " ".join(dp["sentence"][dp["pos"][1]:])
		#print(new_sentence)
		instances.append(new_sentence)
		all_labels.append(dp["label"])
	#print("number of instances: ", len(instances))
	encoded_inp=tokenizer(instances, padding=True, max_length=max_length, return_tensors="pt")
	start_pos=(encoded_inp["input_ids"] == bots_id).nonzero()[:, 1]
	end_pos=(encoded_inp["input_ids"] == eots_id).nonzero()[:, 1]-1
	#print("start_pos: ", start_pos)
	#print("end_pos: ", end_pos)
	
	#print(all_labels)
kulcsar's avatar
kulcsar committed
	for input_info_name in encoded_inp.keys():
		input_information=[]
		for i, t in enumerate(encoded_inp[input_info_name]):
			t=t.tolist()
			t.pop(start_pos[i].item())
			t.pop(end_pos[i].item())
			input_information.append(t)
		encoded_inp[input_info_name] = torch.tensor(input_information)
	#start_pos=start_pos.tolist()
	#end_pos=end_pos.tolist()
	return EncodedTokenDataset(encoded_inp, start_pos, end_pos, all_labels, instances)


def tokenizer_li(input, max_length, masked=False):
	tokenizer=BertTokenizer.from_pretrained("bert-base-uncased")
	all_input_ids=[]
	all_attention_masks=[]
	all_token_type_ids=[]
	all_start_positions=[]
	all_end_positions=[]
	all_labels=[]
	
	for dp in input:
		if masked == True:
			dp["sentence"][dp["pos"][0] : dp["pos"][1]] == "<mask>" #mask token if wanted
		new_dp = " ".join(dp["sentence"]).lower()
		encoded_inp=tokenizer.encode_plus(new_dp, add_special_tokens=True, max_length=max_length, padding=True) #encode Input with BertTokenizer
		tf_tokens = tokenizer.convert_ids_to_tokens(encoded_inp["input_ids"]) #

		orig_to_tok_index2=[]
		all_tokens2 = ['[CLS]'] 
		for (i, token) in enumerate(dp["sentence"]):
			orig_to_tok_index2.append(len(all_tokens2))
			sub_tokens = tokenizer.tokenize(token)
			for sub_token in sub_tokens:
				all_tokens2.append(sub_token)
		orig_to_tok_index2.append(len(all_tokens2))


		if len(tf_tokens)>max_length:
			print("too long")
			continue

		old_target="".join(dp["sentence"][dp["pos"][0]:dp["pos"][1]]).lower()
		#print("old taget: ", old_target)
		new_target="".join(tf_tokens[orig_to_tok_index2[dp["pos"][0]]:orig_to_tok_index2[dp["pos"][1]]]).replace("##", "").lower()
		#print("new old_target: ", new_target)

		if old_target != new_target:
			print("wrong mapping") #check right mapping of positions
			print(old_target)
			print(new_target)
			
			continue
		assert len(encoded_inp["input_ids"]) == len(encoded_inp["attention_mask"]) #default wise its 1s (in li et al implementation)

		#pad the attention masks, input ids, and token type ids with padding_length:
		padding_length=max_length-len(encoded_inp["input_ids"])

		input_ids = encoded_inp["input_ids"]+([0]*padding_length)
		attention_mask = encoded_inp["attention_mask"] + ([0] * padding_length)
		token_type_ids = encoded_inp["token_type_ids"] + ([0] * padding_length)

		#add the infos to dict
		all_input_ids.append(input_ids)
		all_attention_masks.append(attention_mask)
		all_token_type_ids.append(token_type_ids)
		all_start_positions.append(dp["pos"][0]) #do we not have to update the positions?
		all_end_positions.append(dp["pos"][1])
		all_labels.append(dp["label"])

	#turn all the data into a dataset to return 
	dataset=TensorDataset(torch.tensor(all_input_ids, dtype=torch.long), 
						torch.tensor(all_attention_masks, dtype=torch.long),
						torch.tensor(all_token_type_ids, dtype=torch.long),
						torch.tensor(all_start_positions,dtype=torch.long),
						torch.tensor(all_end_positions, dtype=torch.long),
						torch.tensor(all_labels,dtype=torch.long))
	
	return dataset#encoded_inp, tf_tokens, old_target, new_target #input_ids, attention_mask #, sub_tokens #test , orig_to_tok_index,



def split_dataset(train, name_train, name_dev):
	"""Split function (optional) to create random stratified sampled dev set from train dataset. Extracts 10% from
	train dataset and keeps distribution of metonymy vs literals

	params:
		train -> json output (shape of li et al datasets): train dataset 
		name_train -> str: name of the file where you want to save the train set (without .txt)
		name_dev -> str: name of the file to save dev set (without .txt)"""
	metonymies=[]
	literals=[]
	per_of_train=math.ceil(0.1*len(train))

	for el in train:
		if el["label"] == 0:
			literals.append(el)
		else:
			metonymies.append(el)
	
	amount_m = int(0.1*(len(metonymies)))
	amount_l = int(0.1*(len(literals)))
	print("removing {0} samples from {1} metonymies and {2} samples from {3} literals".format(amount_m, len(metonymies), amount_l, len(literals)))
	dev = []
	for i in range(amount_l+1): #+1 because range is exclusive
		selected = random.choice(literals) # is random.choice() random enough?
		dev.append(selected) #add to dev set...
		train.remove(selected) #and remove from train
		literals.remove(selected)
	
	for i in range(amount_m+1):
		selected=random.choice(metonymies)
		dev.append(selected)
		train.remove(selected)
		metonymies.remove(selected)

	assert len(dev) == per_of_train


	random.shuffle(dev)
	
	with open(name_train+".json", "w") as outfile:
		json.dump(train, outfile)

	with open(name_dev+".json", "w") as outfile:
		json.dump(dev, outfile)

	return train, dev