Skip to content
Snippets Groups Projects
Commit a06a399a authored by wesenberg's avatar wesenberg
Browse files

Merge remote-tracking branch 'origin/master'

parents 793d73bf 37eb667b
No related branches found
No related tags found
No related merge requests found
......@@ -123,4 +123,4 @@ def load_amr(fpath):
if __name__ == "__main__":
load_amr(os.path.join("C:\\Users\\Wesenberg\\PycharmProjects\\rl-sentence-compression-wesenberg\\AMR-Visualization-master\\AMR-Visualization-master\\source.txt"))
load_amr('test_single.txt')
(o / open-01
:ARG0 (p / person
:name (n / name
:op1 "Fernando"
:op2 "Henrique"
:op3 "Cardoso")
:ARG0-of (s / scandalize-01
:ARG2 (p2 / peddle-01
:ARG1 (ii / influence-01))
:ARG1-of (m / mushroom-02)))
:ARG1 (p3 / probe-01))
\ No newline at end of file
(d / describe-01
:ARG0 (h / he)
:ARG1 (s / she)
:ARG2 (g / genius))
\ No newline at end of file
# ::snt export of high-tech products has frequently been in the spotlight , making a significant contribution to the growth of foreign trade in guangdong .
# ::tok export of high - tech products has frequently been in the spotlight , making a significant contribution to the growth of foreign trade in guangdong .
# ::alignments 24-25|0 22-23|0.0 21-22|0.0.1.0.1.0 19-20|0.0.1 16-17|0.0.1.0 15-16|0.0.1.0.1 11-12|0.0.0.0 7-8|0.0.0 5-6|0.0.1.0.0.0 4-5|0.0.1.0.0.0.0 2-3|0.0.1.0.0.0.0.0 0-1|0.0.1.0.0 ::annotator JAMR dev v0.3 ::date 2019-07-08T23:42:17.243
(g / guangdong
:ARG2-of (t / trade-01
:ARG1 (f2 / frequent-02
:ARG1 (s2 / spotlight))
:ARG1-of (g2 / grow-01
:ARG2-of (c / contribute-01
:ARG1 (e / export-01
:ARG1 (p / product
:mod (t2 / tech
:ARG1-of (h / high-02))))
:ARG1-of (s / significant-02
:compared-to (f / foreign))))))
(d / describe-01
:ARG0 (h / he)
:ARG1 (s / she)
:ARG2 (g / genius))
\ No newline at end of file
gold :op1 interrogative
know :arg0 you :arg1 it :beneficiary interrogative :mod ( country :name ( name :op1 wouldn ) ) :polarity -
(g / guangdong :ARG2-of (t / trade-01 :ARG1 (f2 / frequent-02 :ARG1 (s2 / spotlight)) :ARG1-of (g2 / grow-01 :ARG2-of (c / contribute-01 :ARG1 (e / export-01 :ARG1 (p / product :mod (t2 / tech :ARG1-of (h / high-02)))) :ARG1-of (s / significant-02 :compared-to (f / foreign))))))
(d / describe-01
:ARG0 (h / he)
:ARG1 (s / she)
:ARG2 (g / genius))
\ No newline at end of file
she was a genius, according to his description
his description of her: genius
\ No newline at end of file
{
"loader": "loaders/gigaword.py",
"dataset": "data/train-data/gigaword",
"indices": "data/train-data/gigaword/indices.npy",
"model_dir": "data/models/gigaword-L10_1500",
"verbose": true,
"print_every": 1,
"eval_every": 500,
"save_every": 500,
"max_val_steps": 1500,
"max_train_seconds": null,
"max_train_steps": 1500,
"batch_size": 4,
"learning_rate": 1e-05,
"k_samples": 100,
"sample_aggregation": "max",
"loss": "pgb",
"encoder_model_id": "distilroberta-base",
"rewards": {
"Fluency": {
"weight": 1,
"type": "masked",
"model_id": "distilroberta-base",
"max_score": 40.0,
"norm": "max"
},
"BiEncoderSimilarity": {
"weight": 1,
"model_id": "all-distilroberta-v1"
},
"GaussianLength": {
"weight": 1,
"mean": 10,
"std": 3.2
}
}
}
......@@ -5,11 +5,11 @@
"model_dir": "data/models/gigaword-L10_1500_then_AMR",
"verbose": true,
"print_every": 1,
"eval_every": 50,
"save_every": 50,
"max_val_steps": 2100,
"eval_every": 100,
"save_every": 100,
"max_val_steps": 2000,
"max_train_seconds": null,
"max_train_steps": 2100,
"max_train_steps": 2000,
"batch_size": 4,
"learning_rate": 1e-05,
"k_samples": 100,
......
......@@ -30,6 +30,7 @@ TOKENIZER = "distilroberta-base"
MODEL_NAME_GIGAWORD_L8 = "gigaword-L8"
MODEL_NAME_GIGAWORD_L8_test = "gigaword-L8_test"
MODEL_NAME_GIGAWORD_L10_AMR = "gigaword-L10_AMR"
MODEL_NAME_GIGAWORD_L10_1500 = "gigaword-L10_1500"
MODEL_NAME_GIGAWORD_L10_2000 = "gigaword-L10_2000"
MODEL_NAME_GIGAWORD_L10_2500 = "gigaword-L10_2500"
MODEL_NAME_GIGAWORD_L10_3000 = "gigaword-L10_3000"
......@@ -56,7 +57,7 @@ MODEL_NAME_P75 = "newsroom-P75"
# TODO The Model-Names in this List will be trained
# MODEL_TO_TRAIN_LIST = [MODEL_NAME_GIGAWORD_L10_2000_THEN_AMR]
MODEL_TO_TRAIN_LIST = [MODEL_NAME_GIGAWORD_L10_2000_THEN_AMR]
MODEL_TO_TRAIN_LIST = [MODEL_NAME_GIGAWORD_L10_1500_THEN_AMR]
# TODO The Model-Names in this List will be used for Baseline
LIST_MODEL_NAME = [MODEL_NAME_GIGAWORD_L10_2000_THEN_AMR_300, MODEL_NAME_GIGAWORD_L10_2000_THEN_AMR_600, MODEL_NAME_GIGAWORD_L10_1500_THEN_AMR_300, MODEL_NAME_GIGAWORD_L10_1500_THEN_AMR_600]
......
......@@ -281,12 +281,12 @@ def write_string_to_file(path, name, model_name, amr_results_summaries, list_rou
tmp += "\n\t\tAMR:\t\t\t" + str(np.mean(amr_score_gold_baseline))
tmp += "\n\n2. Summary/Source -- Macht das Model was es soll?"
tmp += "\n\tModel AMR:"
tmp += "\n\tModel AMR+SMATCH:"
tmp += "\n\t\tF1:\t\t\t\t" + str(np.mean(summe_amr_summary_source_f1))
tmp += "\n\t\tPrecision:\t\t" + str(np.mean(summe_amr_summary_source_precision))
tmp += "\n\t\tRecall:\t\t\t" + str(np.mean(summe_amr_summary_source_recall))
tmp += "\n\n\tBaseline AMR:"
tmp += "\n\n\tBaseline AMR+SMATCH:"
tmp += "\n\t\tF1:\t\t\t\t" + str(np.mean(summe_amr_baseline_source_f1))
tmp += "\n\t\tPrecision:\t\t" + str(np.mean(summe_amr_baseline_source_precision))
tmp += "\n\t\tRecall:\t\t\t" + str(np.mean(summe_amr_baseline_source_recall))
......@@ -298,7 +298,7 @@ def write_string_to_file(path, name, model_name, amr_results_summaries, list_rou
tmp += "\n\t\tst.dev.:\t\t" + str(stdev(summe_rouge_1_fmeasure_summary_baseline))
tmp += "\n\t\tvariance:\t\t" + str(variance(summe_rouge_1_fmeasure_summary_baseline))
tmp += "\n\n\tAMR:"
tmp += "\n\n\tAMR+SMATCH:"
tmp += "\n\t\tmean:\t\t\t" + str(np.mean(amr_score_summary_baseline))
tmp += "\n\t\tmedian:\t\t\t" + str(median(amr_score_summary_baseline))
tmp += "\n\t\tst.dev.:\t\t" + str(stdev(amr_score_summary_baseline))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment