From 2135dd2cec52ac80bad53e76277f4a4ee8b488e1 Mon Sep 17 00:00:00 2001 From: mai <mai@cl.uni-heidelberg.de> Date: Sun, 5 Mar 2023 18:46:17 +0100 Subject: [PATCH] Add test script Test script returns prediction probabilities. Takes a hard-coded sentence --- .gitignore | 1 + .gitmodules | 3 +++ README.md | 7 +++++++ bert-base-uncased-hatexplain-rationale-two | 1 + hatexplain | 1 + test.py | 18 ++++++++++++++++++ 6 files changed, 31 insertions(+) create mode 100644 .gitignore create mode 100644 .gitmodules create mode 160000 bert-base-uncased-hatexplain-rationale-two create mode 120000 hatexplain create mode 100644 test.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5ceb386 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +venv diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..25a1cbe --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "bert-base-uncased-hatexplain-rationale-two"] + path = bert-base-uncased-hatexplain-rationale-two + url = https://huggingface.co/Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two diff --git a/README.md b/README.md index 0f56f18..91d3adb 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,12 @@ # adversarial-hatespeech +## Installation +```bash +$ pip install transformers +$ pip install --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 +$ git clone https://huggingface.co/Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two +``` + ## Getting started diff --git a/bert-base-uncased-hatexplain-rationale-two b/bert-base-uncased-hatexplain-rationale-two new file mode 160000 index 0000000..7b1a724 --- /dev/null +++ b/bert-base-uncased-hatexplain-rationale-two @@ -0,0 +1 @@ +Subproject commit 7b1a724a178c639a4b3446c0ff8f13d19be4f471 diff --git a/hatexplain b/hatexplain new file mode 120000 index 0000000..022cf0d --- /dev/null +++ b/hatexplain @@ -0,0 +1 @@ +bert-base-uncased-hatexplain-rationale-two \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..8a2ab3c --- /dev/null +++ b/test.py @@ -0,0 +1,18 @@ +import torch +from transformers import AutoTokenizer, AutoModelForSequenceClassification +### from models.py +from hatexplain.models import * + +device = 'cuda' if torch.cuda.is_available() else 'cpu' + +tokenizer = AutoTokenizer.from_pretrained("Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two") +model = \ + Model_Rational_Label.from_pretrained( + "Hate-speech-CNERG/bert-base-uncased-hatexplain-rationale-two" + ) +model = model.to(device) +inputs = tokenizer('He is a great guy', return_tensors="pt").to(device) +prediction_logits, _ = model(input_ids=inputs['input_ids'],attention_mask=inputs['attention_mask']) +softmax = torch.nn.Softmax(dim=1) +probs = softmax(prediction_logits) +print(f"Normal: {probs[0][0]}\nHatespeech: {probs[0][1]}") -- GitLab