Attention matrix:

https://github.com/rockingdingo/deepnlp/blob/r0.1.6/deepnlp/textsum/eval.py
plot_attention(data, X_label=None, Y_label=None)函数

#!/usr/bin/python
# -*- coding:utf-8 -*-"""
Evaluation Method for summarization tasks, including BLUE and ROUGE score
Visualization of Attention Mask Matrix: plot_attention() method
"""from __future__ import absolute_import
from __future__ import division
from __future__ import print_functionimport os
import sysimport matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt # drawing heat map of attention weights
plt.rcParams['font.sans-serif']=['SimSun'] # set font familyimport timedef evaluate(X, Y, method = "rouge_n", n = 2):score = 0.0if (method == "rouge_n") :score = eval_rouge_n(X, Y, n)elif (method == "rouge_l"):score = eval_rouge_l(X, Y)elif (method == "bleu"):score = eval_bleu(X, Y, n)else:print ("method not found")score = 0.0return scoredef eval_bleu(y_candidate, y_reference, n = 2):'''Args: y_candidate: list of words, machine generated predictiony_reference: list of list, [[], [],], human generated referenced lineReturn:rouge_n score:double, maximum of pairwise rouge-n score'''if (type(y_reference[0]) != list):print ('y_reference should be list of list')returnm = len(y_reference)bleu_score = 0.0ngram_cand = generate_ngrams(y_candidate, n)total_cand_count = len(ngram_cand)ngram_ref_list = [] # list of ngrams for each reference sentencefor i in range(m): ngram_ref_list.append(generate_ngrams(y_reference[i], n))total_clip_count = 0for tuple in set(ngram_cand):# for each unique n-gram tuple in ngram_cand, calculate the clipped countcand_count = count_element(ngram_cand, tuple)max_ref_count = 0 # max_ref_count for this tuple in the references sentencesfor i in range(m): # tuple count in reference sentence inum = count_element(ngram_ref_list[i], tuple)max_ref_count = num if max_ref_count < num else max_ref_count # compare max_ref_count and numtotal_clip_count += min(cand_count, max_ref_count)  bleu_score = total_clip_count/total_cand_countreturn bleu_scoredef count_element(list, element):if element in list:return list.count(element)else:return 0def eval_rouge_n(y_candidate, y_reference, n = 2):'''Args: y_candidate: list of words, machine generated predictiony_reference: list of list, [[], [],], human generated referenced lineReturn:rouge_n score:double, maximum of pairwise rouge-n score'''if (type(y_reference[0]) != list):print ('y_reference should be list of list')returnm = len(y_reference)rouge_score = []ngram_cand = generate_ngrams(y_candidate, n)for i in range(m):ngram_ref = generate_ngrams(y_reference[i], n)num_match = count_match(ngram_cand, ngram_ref)rouge_score.append(num_match/len(ngram_ref))return max(rouge_score)def generate_ngrams(input_list, n):'''zip(x, x[1:,],x[2,],...x[n,]), end with shorted list'''return zip(*[input_list[i:] for i in range(n)])def count_match(listA, listB):match_list = [tuple for tuple in listA if tuple in listB]return len(match_list)def eval_rouge_l(y_candidate, y_reference):'''Args: y_candidate: list of words, machine generated predictiony_reference: list of list, [[], [],], human generated referenced lineReturn:rouge_l score:double, F1 score of longest common sequence'''if (type(y_reference[0]) != list):print ('y_reference should be list of list')returnK = len(y_reference)lcs_count = 0.0total_cand = len(y_candidate) # total of candidate wordstotal_ref = 0.0  # total of reference wordsfor k in range(K):cur_lcs = LCS(y_candidate, y_reference[k])lcs_count += len(cur_lcs)total_ref += len(y_reference[k])recall = lcs_count/total_refprecision = lcs_count/total_candbeta = 8.0 # coefficientf1 = (1 + beta * beta) * precision * recall/(recall + beta * beta * precision)return f1def LCS(X, Y):'''Get the element of longest common sequence'''length, flag = calc_LCS(X, Y)common_seq_rev = [] # reverse sequence# starting from end of X and Ystart_token = "START"X_new = [start_token] + list(X)Y_new = [start_token] + list(Y)i = len(X_new) - 1j = len(Y_new) - 1while(i >= 0 and j >= 0):if (flag[i][j] == 1):common_seq_rev.append(X_new[i])i -= 1j -= 1elif (flag[i][j] == 2):i -= 1   # i -> i-1else:j -= 1   # flag[i][j] == 3, j -> j-1common_seq =[common_seq_rev[len(common_seq_rev) - 1 - i] for i in range(len(common_seq_rev))]return common_seqdef calc_LCS(X, Y):'''Calculate Longest Common SequenceGet the length[][] matrix and flag[][] matrix of X and Y;length[i][j]: longest common sequence length up to X[i] and Y[j];flag[i][j]: path of LCS, (1,2,3) 1: jump diagonal, 2: jump down i-1 ->i, 3: jump right j-1 -> j '''start_token = "START"X_new = [start_token] + list(X) # adding start token to X sequenceY_new = [start_token] + list(Y)m = len(X_new)n = len(Y_new)# starting length and flag matrix size : (m + 1) * (n + 1)length = [[0 for j in range(n)] for i in range(m)]flag = [[0 for j in range(n)] for i in range(m)]for i in range(1, m):for j in range(1, n):if (X_new[i] == Y_new[j]): # compare stringlength[i][j] = length[i-1][j-1] + 1flag[i][j] = 1 # diagonalelse:if (length[i-1][j] > length[i][j-1]):length[i][j] = length[i-1][j]flag[i][j] = 2 # (i-1) -> ielse:length[i][j] = length[i][j-1]flag[i][j] = 3 # (j-1) -> jreturn length, flagdef plot_attention(data, X_label=None, Y_label=None):'''Plot the attention model heatmapArgs:data: attn_matrix with shape [ty, tx], cutted before 'PAD'X_label: list of size tx, encoder tagsY_label: list of size ty, decoder tags'''fig, ax = plt.subplots(figsize=(20, 8)) # set figure sizeheatmap = ax.pcolor(data, cmap=plt.cm.Blues, alpha=0.9)# Set axis labelsif X_label != None and Y_label != None:X_label = [x_label.decode('utf-8') for x_label in X_label]Y_label = [y_label.decode('utf-8') for y_label in Y_label]xticks = range(0,len(X_label))ax.set_xticks(xticks, minor=False) # major ticksax.set_xticklabels(X_label, minor = False, rotation=45)   # labels should be 'unicode'yticks = range(0,len(Y_label))ax.set_yticks(yticks, minor=False)ax.set_yticklabels(Y_label, minor = False)   # labels should be 'unicode'ax.grid(True)# Save Figureplt.title(u'Attention Heatmap')timestamp = int(time.time())file_name = 'img/attention_heatmap_' + str(timestamp) + ".jpg"print ("Saving figures %s" % file_name)fig.savefig(file_name)   # save the figure to fileplt.close(fig)    # close the figuredef test():#strA = "ABCBDAB"#strB = "BDCABA" #m = LCS(strA, strB)#listA = ['但是','我', '爱' ,'吃', '肉夹馍']#listB = ['我', '不是', '很', '爱', '肉夹馍']#m = LCS(listA, listB)y_candidate = ['我', '爱', '吃', '北京', '烤鸭']y_reference = [['我', '爱', '吃', '北京', '小吃', '烤鸭'], ['他', '爱', '吃', '北京', '烤鹅'],['但是', '我', '很','爱', '吃', '西湖', '醋鱼']]p1 = eval_rouge_l(y_candidate, y_reference)print ("ROUGE-L score %f" % p1)p2 = eval_rouge_n(y_candidate, y_reference, 2)print ("ROUGE-N score %f" % p2)p3 = eval_bleu(y_candidate, y_reference, 2)print ("BLEU score %f" % p3)if __name__ == "__main__":test()

self_attention:

https://github.com/kaushalshetty/Structured-Self-Attention/tree/master/visualization

#Credits to Lin Zhouhan(@hantek) for the complete visualization code
import random, os, numpy, scipy
from codecs import open
def createHTML(texts, weights, fileName):"""Creates a html file with text heat.weights: attention weights for visualizingtexts: text on which attention weights are to be visualized"""fileName = "visualization/"+fileNamefOut = open(fileName, "w", encoding="utf-8")part1 = """<html lang="en"><head><meta http-equiv="content-type" content="text/html; charset=utf-8"><style>body {font-family: Sans-Serif;}</style></head><body><h3>Heatmaps</h3></body><script>"""part2 = """var color = "255,0,0";var ngram_length = 3;var half_ngram = 1;for (var k=0; k < any_text.length; k++) {var tokens = any_text[k].split(" ");var intensity = new Array(tokens.length);var max_intensity = Number.MIN_SAFE_INTEGER;var min_intensity = Number.MAX_SAFE_INTEGER;for (var i = 0; i < intensity.length; i++) {intensity[i] = 0.0;for (var j = -half_ngram; j < ngram_length-half_ngram; j++) {if (i+j < intensity.length && i+j > -1) {intensity[i] += trigram_weights[k][i + j];}}if (i == 0 || i == intensity.length-1) {intensity[i] /= 2.0;} else {intensity[i] /= 3.0;}if (intensity[i] > max_intensity) {max_intensity = intensity[i];}if (intensity[i] < min_intensity) {min_intensity = intensity[i];}}var denominator = max_intensity - min_intensity;for (var i = 0; i < intensity.length; i++) {intensity[i] = (intensity[i] - min_intensity) / denominator;}if (k%2 == 0) {var heat_text = "<p><br><b>Example:</b><br>";} else {var heat_text = "<b>Example:</b><br>";}var space = "";for (var i = 0; i < tokens.length; i++) {heat_text += "<span style='background-color:rgba(" + color + "," + intensity[i] + ")'>" + space + tokens[i] + "</span>";if (space == "") {space = " ";}}//heat_text += "<p>";document.body.innerHTML += heat_text;}</script></html>"""putQuote = lambda x: "\"%s\""%xtextsString = "var any_text = [%s];\n"%(",".join(map(putQuote, texts)))weightsString = "var trigram_weights = [%s];\n"%(",".join(map(str,weights)))fOut.write(part1)fOut.write(textsString)fOut.write(weightsString)fOut.write(part2)fOut.close()return

Attention可视化相关推荐

  1. 【NLP】可交互的 Attention 可视化工具!我的Transformer可解释性有救了?

    文 | Sherry 视觉是人和动物最重要的感觉,至少有80%以上的外界信息是经过视觉获得的.我们看论文的时候,通过图表来确定文章的大致内容往往也是一个更高效的 说到深度神经网络的可视化,最经典的莫过 ...

  2. 推荐一个可交互的 Attention 可视化工具!我的Transformer可解释性有救啦?

    本文首发于微信公众号"夕小瑶的卖萌屋" 文 | Sherry 源 | 夕小瑶的卖萌屋 视觉是人和动物最重要的感觉,至少有80%以上的外界信息是经过视觉获得的.我们看论文的时候,通过 ...

  3. 可交互的 Attention 可视化工具!我的Transformer可解释性有救了?

    文 | Sherry 视觉是人和动物最重要的感觉,至少有80%以上的外界信息是经过视觉获得的.我们看论文的时候,通过图表来确定文章的大致内容往往也是一个更高效的 说到深度神经网络的可视化,最经典的莫过 ...

  4. NLP简报(Issue#4):Turing-NLG、REALM、ERNIE-GEN、Transformer attention可视化等等

    欢迎来到 NLP 时事简报!全文比较长,建议收藏慢慢看.另外加了目录方便直接索引到自己感兴趣的部分.enjoy 文章目录 1.Publications

  5. attention seq2seq transformer bert 学习总结 _20201107

    https://blog.csdn.net/weixin_44388679/article/details/102575223 Seq2Seq原理详解 一文读懂BERT(原理篇) 2018年的10月1 ...

  6. Attention注意力机制的前世今身

    ©PaperWeekly 原创 · 作者|马敏博 学校|西南交通大学硕士生 研究方向|命名实体识别 总体介绍 注意力机制(Attention Mechanism)最早应用 CV 任务上 ,但最初的论文 ...

  7. 经典重温:《Attention Is All You Need》详解

    关注公众号,发现CV技术之美 本文位52CV粉丝投稿. 原博客地址:https://blog.csdn.net/michaelshare/article/details/124178685 该篇文章由 ...

  8. 系统学习NLP(二十四)--详解Transformer (Attention Is All You Need)

    转自:https://zhuanlan.zhihu.com/p/48508221 推荐:http://jalammar.github.io/illustrated-transformer/ 前言 注意 ...

  9. 《The Annotated Transformer》翻译——注释和代码实现《Attention Is All You Need》

    文章目录 预备工作 背景 模型架构 Encoder and Decoder 堆栈 Encoder Decoder Attention 模型中Attention的应用 基于位置的前馈网络 Embeddi ...

最新文章

  1. 宜家通信- 会员管理 表结构搭建
  2. oracle fra空间不足,ORACLE 基础解决方案1_扩大FRA区
  3. python项目开发实战网盘-python项目开发实战 第2版
  4. SQL Server 2008连载之存储结构——基本系统视图
  5. uva 10254——The Priest Mathematician
  6. adb 静默安装_怎么实现Android APP静默安装
  7. java web应用开发渐进教程_Java Web应用开发渐进教程
  8. JavaSE08:详解多线程(超详细)
  9. Visual Basic之父回忆往事
  10. 危险,几条可致命的Linux命令!
  11. CentOS 6.9使用iptables搭建网关服务器(转)
  12. linux中产生随机数函数,如何用C++产生随机数
  13. WindowsXP_KB909667_x86_ENU
  14. Python 全栈打造某宝客微信机器人
  15. 请问Bat文件是用什么什么语言写的?
  16. 刷程序对车危害_汽车刷程序对车有什么影响
  17. centos7系统关机命令_centos关机与重启命令
  18. 一篇写给迷茫的你或者想入门java的同学们
  19. magnifierrentgen(放大镜)插件的使用
  20. Python期末总结

热门文章

  1. python3学习六字符串和文本
  2. 可编程器件电子产品设计与制作实训台QY-GY01X
  3. Android 数据存储---外部存储(SD卡)
  4. 迪奥丽人时尚流行的中坚力量
  5. java基础笔记大全
  6. 爱酷学习网鼓励大家上传视频教程
  7. Windbg下无法显示dbgPrint输出的信息
  8. 什么是主域、子域_什么是子域?
  9. android linearlayout 自定义,android 自定义LinearLayout
  10. mysql创建超市管理_C#+mysql实现的超市管理系统