File size: 3,555 Bytes
23fab59
ecab635
23fab59
6ea96b7
23fab59
 
 
 
 
 
 
 
e427166
 
 
 
 
23fab59
 
 
 
 
 
 
 
 
 
bba233c
6ea96b7
 
 
1bd8901
6ea96b7
 
 
 
 
 
 
69b3eab
 
 
 
 
 
 
 
 
 
bba233c
23fab59
 
 
 
 
69b3eab
 
 
23fab59
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import nltk
from qanom.qanom_end_to_end_pipeline import QANomEndToEndPipeline
from typing import List

models = ["kleinay/qanom-seq2seq-model-baseline", 
          "kleinay/qanom-seq2seq-model-joint"]
pipelines = {model: QANomEndToEndPipeline(model) for model in models}


description = f"""This is a demo of the full QANom Pipeline - identifying deverbal nominalizations and parsing them with question-answer driven semantic role labeling (QASRL) """ 
title="QANom End-to-End Pipeline Demo"
examples = [[models[1], "the construction of the officer 's building finished right after the beginning of the destruction of the previous construction .", 0.7],
            [models[1], "The doctor asked about the progress in Luke 's treatment .", 0.75],
            [models[0], "The Veterinary student was interested in Luke 's treatment of sea animals .", 0.75],
            [models[1], "Some reviewers agreed that the criticism raised by the AC is mostly justified .", 0.5]]
            

input_sent_box_label = "Insert sentence here, or select from the examples below"
links = """<p style='text-align: center'>
<a href='https://www.qasrl.org' target='_blank'>QASRL Website</a>  |  <a href='https://huggingface.co/kleinay/qanom-seq2seq-model-baseline' target='_blank'>Model Repo at Huggingface Hub</a>
</p>"""


def call(model_name, sentence, detection_threshold):

    pipeline = pipelines[model_name]
    pred_infos = pipeline([sentence], detection_threshold=detection_threshold)[0]
    def pretty_qas(pred_info) -> List[str]:
        if not pred_info or not pred_info['QAs']: return []
        return [f"{qa['question']} --- {';'.join(qa['answers'])}"
                for qa in pred_info['QAs'] if qa is not None]
    all_qas = [qa for pred_info in pred_infos for qa in pretty_qas(pred_info)]                      
    if not pred_infos:
        pretty_qa_output = "NO NOMINALIZATION FOUND"
    elif not all_qas:
        pretty_qa_output = "NO QA GENERATED"
    else:
        pretty_qa_output = "\n".join(all_qas) 
    # also present highlighted predicates
    positives = [pred_info['predicate_idx'] for pred_info in pred_infos] 
    def color(idx):
        if idx in positives: return "lightgreen"
    idx2verb = {d["predicate_idx"] : d["verb_form"] for d in pred_infos}
    idx2prob = {d["predicate_idx"] : d["predicate_detector_probability"] for d in pred_infos}
    def word_span(word, idx):
        tooltip = f'title=" probability={idx2prob[idx]:.2}&#010;verb={idx2verb[idx]}"' if idx in idx2verb else ''
        return f'<span {tooltip} style="background-color: {color(idx)}">{word}</span>'
    html = '<span>' + ' '.join(word_span(word, idx) for idx, word in enumerate(sentence.split(" "))) + '</span>'
    return html, pretty_qa_output , pred_infos

iface = gr.Interface(fn=call, 
                     inputs=[gr.inputs.Radio(choices=models, default=models[0], label="Model"), 
                             gr.inputs.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4), 
                             gr.inputs.Slider(minimum=0., maximum=1., step=0.01, default=0.5, label="Nominalization Detection Threshold")], 
                     outputs=[gr.outputs.HTML(label="Detected Nominalizations"),
                              gr.outputs.Textbox(label="Generated QAs"), 
                              gr.outputs.JSON(label="Raw Model Output")],
                     title=title,
                     description=description,
                     article=links,
                     examples=examples)
iface.launch()