Lora
commited on
Commit
·
c289bbc
1
Parent(s):
6ae73ed
Initial Commit
Browse files
app.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import transformers
|
| 4 |
+
import gradio as gr
|
| 5 |
+
|
| 6 |
+
# def visualize_word(word, tokenizer, vecs, lm_head, count=5, contents=None):
|
| 7 |
+
def visualize_word(word, count=10, remove_space=False):
|
| 8 |
+
|
| 9 |
+
if not remove_space:
|
| 10 |
+
word = ' ' + word
|
| 11 |
+
print(f"Looking up word ['{word}']")
|
| 12 |
+
|
| 13 |
+
# seems very dumb to have to load the tokenizer every time, but I don't know how to pass a non-interface element into the function in gradio
|
| 14 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
|
| 15 |
+
vecs = torch.load("senses/all_vecs_mtx.pt")
|
| 16 |
+
lm_head = torch.load("senses/lm_head.pt")
|
| 17 |
+
print("lm_head.shape = ", lm_head.shape)
|
| 18 |
+
|
| 19 |
+
token_ids = tokenizer(word)['input_ids']
|
| 20 |
+
tokens = [tokenizer.decode(token_id) for token_id in token_ids]
|
| 21 |
+
tokens = ", ".join(tokens)
|
| 22 |
+
# look up sense vectors only for the first token
|
| 23 |
+
contents = vecs[token_ids[0]] # torch.Size([16, 768])
|
| 24 |
+
|
| 25 |
+
sense_names = []
|
| 26 |
+
pos_sense_word_lists = []
|
| 27 |
+
neg_sense_word_lists = []
|
| 28 |
+
|
| 29 |
+
for i in range(contents.shape[0]):
|
| 30 |
+
logits = contents[i,:] @ lm_head.t() # (vocab,) [768] @ [768, 50257] -> [50257]
|
| 31 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
| 32 |
+
sense_names.append('sense {}'.format(i+1))
|
| 33 |
+
|
| 34 |
+
# currently a lot of repetition
|
| 35 |
+
pos_sorted_words = [tokenizer.decode(sorted_indices[j]) for j in range(count)]
|
| 36 |
+
pos_sorted_logits = [sorted_logits[j].item() for j in range(count)]
|
| 37 |
+
pos_word_list = list(zip(pos_sorted_words, pos_sorted_logits))
|
| 38 |
+
pos_sense_word_lists.append(pos_word_list)
|
| 39 |
+
|
| 40 |
+
neg_sorted_words = [tokenizer.decode(sorted_indices[-j-1]) for j in range(count)]
|
| 41 |
+
neg_sorted_logits = [sorted_logits[-j-1].item() for j in range(count)]
|
| 42 |
+
neg_word_list = list(zip(neg_sorted_words, neg_sorted_logits))
|
| 43 |
+
neg_sense_word_lists.append(neg_word_list)
|
| 44 |
+
|
| 45 |
+
pos_data = dict(zip(sense_names, pos_sense_word_lists))
|
| 46 |
+
pos_df = pd.DataFrame(index=[i for i in range(count)],
|
| 47 |
+
columns=list(pos_data.keys()))
|
| 48 |
+
for prop, word_list in pos_data.items():
|
| 49 |
+
for i, word_pair in enumerate(word_list):
|
| 50 |
+
cell_value = "{} ({:.2f})".format(word_pair[0], word_pair[1])
|
| 51 |
+
pos_df.at[i, prop] = cell_value
|
| 52 |
+
|
| 53 |
+
neg_data = dict(zip(sense_names, neg_sense_word_lists))
|
| 54 |
+
neg_df = pd.DataFrame(index=[i for i in range(count)],
|
| 55 |
+
columns=list(neg_data.keys()))
|
| 56 |
+
for prop, word_list in neg_data.items():
|
| 57 |
+
for i, word_pair in enumerate(word_list):
|
| 58 |
+
cell_value = "{} ({:.2f})".format(word_pair[0], word_pair[1])
|
| 59 |
+
neg_df.at[i, prop] = cell_value
|
| 60 |
+
|
| 61 |
+
return pos_df, neg_df, tokens
|
| 62 |
+
|
| 63 |
+
# argp = argparse.ArgumentParser()
|
| 64 |
+
# argp.add_argument('vecs_path')
|
| 65 |
+
# argp.add_argument('lm_head_path')
|
| 66 |
+
# args = argp.parse_args()
|
| 67 |
+
|
| 68 |
+
# Load tokenizer and parameters
|
| 69 |
+
# tokenizer = transformers.AutoTokenizer.from_pretrained('gpt2')
|
| 70 |
+
# vecs = torch.load(args.vecs_path)
|
| 71 |
+
# lm_head = torch.load(args.lm_head_path)
|
| 72 |
+
|
| 73 |
+
# visualize_word(input('Enter a word:'), tokenizer, vecs, lm_head, count=5)
|
| 74 |
+
# visualize_word("fish", vecs, lm_head, count=COUNT)
|
| 75 |
+
|
| 76 |
+
with gr.Blocks() as demo:
|
| 77 |
+
gr.Markdown("""
|
| 78 |
+
## Backpack visualization: senses lookup
|
| 79 |
+
> Note: Backpack uses the GPT-2 tokenizer, which includes the space before a word as part of the token, so by default, a space character `' '` is added to the beginning of the word you look up. You can disable this by checking `Remove space before word`, but know this might cause strange behaviors like breaking `afraid` into `af` and `raid`, or `slight` into `s` and `light`.
|
| 80 |
+
""")
|
| 81 |
+
with gr.Row():
|
| 82 |
+
word = gr.Textbox(label="Word")
|
| 83 |
+
token_breakdown = gr.Textbox(label="Token Breakdown (senses are for the first token only)")
|
| 84 |
+
remove_space = gr.Checkbox(label="Remove space before word", default=False)
|
| 85 |
+
count = gr.Slider(minimum=1, maximum=20, value=10, label="Top K", step=1)
|
| 86 |
+
# sentence = gr.Textbox(label="Sentence")
|
| 87 |
+
pos_outputs = gr.Dataframe(label="Highest Scoring Senses")
|
| 88 |
+
neg_outputs = gr.Dataframe(label="Lowest Scoring Senses")
|
| 89 |
+
gr.Examples(
|
| 90 |
+
examples=["science", "afraid", "book", "slight"],
|
| 91 |
+
inputs=[word],
|
| 92 |
+
outputs=[pos_outputs, neg_outputs, token_breakdown],
|
| 93 |
+
fn=visualize_word,
|
| 94 |
+
# cache_examples=True,
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
gr.Button("Look up").click(
|
| 98 |
+
fn=visualize_word,
|
| 99 |
+
inputs= [word, count, remove_space],
|
| 100 |
+
outputs= [pos_outputs, neg_outputs, token_breakdown],
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# sentence.select(
|
| 104 |
+
# fn=visualize_word,
|
| 105 |
+
# inputs= [word, count],
|
| 106 |
+
# outputs= [pos_outputs, neg_outputs],
|
| 107 |
+
# )
|
| 108 |
+
|
| 109 |
+
demo.launch(share=False)
|
| 110 |
+
|