#!/usr/bin/env python # coding: utf-8 #

Table of Contents

#
# ## 原理 # $$Score = \sum_{n=1}^N \{ \sum_{all\ w_1...w_n\ that\ co-occur} Info(w_1...w_n) / \sum_{all\ w_1...w_n\ in\ sys\ output} (1) · \exp [\beta \log^2(min\{\frac{L_{sys}}{\overline{L_{ref}}}, 1\})] \} $$ # $$Info(w_1...w_n) = \log_2 \frac{the\ \#\ of\ occurrences\ of\ w_1...w_{n-1}}{the\ \#\ of\ occurrences\ of\ w_1...w_{n}}$$ # 其中: # - β 是一个常数(经验阈值),使得 Lhyp/Lref = 2/3 时,β 使得长度惩罚系数为 0.5 # - $\overline{L_{ref}}$ 是参考译文的平均长度 # 关于 β 的计算如下: # # $e^{\beta \ln^2(2/3)} = 1/2$,两边同时取 ln 为底对数: # # $\beta = \frac{\ln(1/2)}{\ln^2(2/3)}$ # ## 参考 Code # In[59]: hypothese1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which','ensures', 'that', 'the', 'military', 'always','obeys', 'the', 'commands', 'of', 'the', 'party'] hypothese2 = ['It', 'is', 'to', 'insure', 'the', 'troops','forever', 'hearing', 'the', 'activity', 'guidebook','that', 'party', 'direct'] reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that','ensures', 'that', 'the', 'military', 'will', 'forever','heed', 'Party', 'commands'] reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which','guarantees', 'the', 'military', 'forces', 'always','being', 'under', 'the', 'command', 'of', 'the','Party'] reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the','army', 'always', 'to', 'heed', 'the', 'directions','of', 'the', 'party'] # In[199]: import math import fractions from collections import Counter import nltk from nltk.util import ngrams # In[4]: # 计算 nist 分值 def sentence_nist(references, hypothesis, n=5): """ Calculate NIST score from George Doddington. 2002. "Automatic evaluation of machine translation quality using n-gram co-occurrence statistics." Proceedings of HLT. Morgan Kaufmann Publishers Inc. http://dl.acm.org/citation.cfm?id=1289189.1289273 DARPA commissioned NIST to develop an MT evaluation facility based on the BLEU score. The official script used by NIST to compute BLEU and NIST score is mteval-14.pl. The main differences are: - BLEU uses geometric mean of the ngram overlaps, NIST uses arithmetic mean. - NIST has a different brevity penalty - NIST score from mteval-14.pl has a self-contained tokenizer Note: The mteval-14.pl includes a smoothing function for BLEU score that is NOT used in the NIST score computation. >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'military', 'always', ... 'obeys', 'the', 'commands', 'of', 'the', 'party'] >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops', ... 'forever', 'hearing', 'the', 'activity', 'guidebook', ... 'that', 'party', 'direct'] >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'military', 'will', 'forever', ... 'heed', 'Party', 'commands'] >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'military', 'forces', 'always', ... 'being', 'under', 'the', 'command', 'of', 'the', ... 'Party'] >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'army', 'always', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'party'] >>> sentence_nist([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS 3.3709... >>> sentence_nist([reference1, reference2, reference3], hypothesis2) # doctest: +ELLIPSIS 1.4619... :param references: reference sentences :type references: list(list(str)) :param hypothesis: a hypothesis sentence :type hypothesis: list(str) :param n: highest n-gram order :type n: int """ return corpus_nist([references], [hypothesis], n) # In[203]: # 计算长度惩罚因子 # 经验值,使得在 hyp_len/ref_len = 2/3 时,长度惩罚因子值为 0.5 def nist_length_penalty(ref_len, hyp_len): """ Calculates the NIST length penalty, from Eq. 3 in Doddington (2002) penalty = exp( beta * log( min( len(hyp)/len(ref) , 1.0 ))) where, `beta` is chosen to make the brevity penalty factor = 0.5 when the no. of words in the system output (hyp) is 2/3 of the average no. of words in the reference translation (ref) The NIST penalty is different from BLEU's such that it minimize the impact of the score of small variations in the length of a translation. See Fig. 4 in Doddington (2002) """ ratio = hyp_len / ref_len if 0 < ratio < 1: ratio_x, score_x = 2/3, 1/2 # 原 ratio_x 为 1.5 beta = math.log(score_x) / (math.log(ratio_x) ** 2) return math.exp(beta * math.log(ratio) ** 2) else: # ratio <= 0 or ratio >= 1 return max(min(ratio, 1.0), 0.0) # In[189]: def corpus_nist(list_of_references, hypotheses, n=5): """ Calculate a single corpus-level NIST score (aka. system-level BLEU) for all the hypotheses and their respective references. :param references: a corpus of lists of reference sentences, w.r.t. hypotheses :type references: list(list(list(str))), [[[], []...]] :param hypotheses: a list of hypothesis sentences :type hypotheses: list(list(str)), [[]] :param n: highest n-gram order :type n: int """ # Before proceeding to compute NIST, perform sanity checks. assert len(list_of_references) == len( hypotheses ), "The number of hypotheses and their reference(s) should be the same" # Collect the ngram coounts from the reference sentences. ngram_freq = Counter() total_reference_words = 0 for (references) in list_of_references: # For each source sent, there's a list of reference sents. for reference in references: # For each order of ngram, count the ngram occurrences. for i in range(1, n + 1): ngram_freq.update(ngrams(reference, i)) total_reference_words += len(reference) # Compute the information weights based on the reference sentences. # Eqn 2 in Doddington (2002): # Info(w_1 ... w_n) = log_2 [ (# of occurrences of w_1 ... w_n-1) / (# of occurrences of w_1 ... w_n) ] information_weights = {} for _ngram in ngram_freq: # w_1 ... w_n _mgram = _ngram[:-1] # w_1 ... w_n-1 # From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v13a.pl#L546 # it's computed as such: # denominator = ngram_freq[_mgram] if _mgram and _mgram in ngram_freq # else denominator = total_reference_words # information_weights[_ngram] = -1 * math.log(ngram_freq[_ngram]/denominator) / math.log(2) # # Mathematically, it's equivalent to the our implementation: if _mgram and _mgram in ngram_freq: numerator = ngram_freq[_mgram] else: # 赋予单个词高权重 numerator = total_reference_words information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) # Micro-average. # 计算 n = 「1-5」时,分子分母的值(分子、分母各 5 个值) nist_precision_numerator_per_ngram = Counter() nist_precision_denominator_per_ngram = Counter() l_ref, l_sys = 0, 0 # For each order of ngram. for i in range(1, n + 1): # Iterate through each hypothesis and their corresponding references. for references, hypothesis in zip(list_of_references, hypotheses): hyp_len = len(hypothesis) # Find reference with the best NIST score. nist_score_per_ref = [] for reference in references: _ref_len = len(reference) # Counter of ngrams in hypothesis. hyp_ngrams = ( Counter(ngrams(hypothesis, i)) if len(hypothesis) >= i else Counter() ) ref_ngrams = ( Counter(ngrams(reference, i)) if len(reference) >= i else Counter() ) # 取交集,取交集会取到较小者 ngram_overlaps = hyp_ngrams & ref_ngrams # Precision part of the score in Eqn 3 _numerator = sum( information_weights[_ngram] * count for _ngram, count in ngram_overlaps.items() ) _denominator = sum(hyp_ngrams.values()) _precision = 0 if _denominator == 0 else _numerator / _denominator nist_score_per_ref.append( (_precision, _numerator, _denominator, _ref_len) ) # Best reference. # max(数组),从第一个元素开始比较 # 优先级:比值 > 分子 > 分母 > 长度 precision, numerator, denominator, ref_len = max(nist_score_per_ref) nist_precision_numerator_per_ngram[i] += numerator nist_precision_denominator_per_ngram[i] += denominator l_ref += ref_len l_sys += hyp_len # Final NIST micro-average mean aggregation. nist_precision = 0 for i in nist_precision_numerator_per_ngram: precision = ( nist_precision_numerator_per_ngram[i] / nist_precision_denominator_per_ngram[i] ) nist_precision += precision # Eqn 3 in Doddington(2002) return nist_precision * nist_length_penalty(l_ref, l_sys) # ## 过程 # In[190]: # Reference Ngram Freq ngram_freq= Counter() total_reference_words = 0 for (references) in [[reference1, reference2, reference3]]: for reference in references: for i in range(1, 5 + 1): ngram_freq.update(ngrams(reference, i)) total_reference_words += len(reference) # In[191]: # Information Weights information_weights = {} for _ngram in ngram_freq: _mgram = _ngram[:-1] if _mgram and _mgram in ngram_freq: numerator = ngram_freq[_mgram] else: numerator = total_reference_words information_weights[_ngram] = math.log(numerator / ngram_freq[_ngram], 2) # In[192]: corpus_nist([[reference1, reference2, reference3]], [hypothese1]) # In[195]: # 下面以 hypothese1 为例来说明: nist_score = (51.71/18 + 6.75/17 + 1.58/16 + 0/15 + 0/14) * nist_length_penalty(16*3+18+18, 18*5) nist_score # 1-Gram # # | gram | hyp | ref1 | overlaps | ref2 | overlaps | ref3 | overlaps | # | -------- | ------ | ------- | --------- | ------- | --------- | ------- | --------- | # | It | 1 | 1 | 4.06\*1 | 1 | 4.06\*1 | 1 | 4.06\*1 | # | is | 1 | 1 | 4.06\*1 | 1 | 4.06\*1 | 1 | 4.06\*1 | # | a | 1 | 1 | 5.64\*1 | 0 | 0 | 0 | 0 | # | guide | 1 | 1 | 4.64\*1 | 0 | 0 | 1 | 4.64\*1 | # | to | 1 | 1 | 4.64\*1 | 0 | 0 | 1 | 4.64\*1 | # | action | 1 | 1 | 5.64\*1 | 0 | 0 | 0 | 0 | # | which | 1 | 0 | 0 | 1 | 5.64\*1 | 0 | 0 | # | ensures | 1 | 1 | 5.64\*1 | 0 | 0 | 0 | 0 | # | that | 1 | 2 | 4.64\*1 | 0 | 0 | 0 | 0 | # | the | 3 | 1 | 2.47\*1 | 4 | 2.47\*3 | 4 | 2.47\*3 | # | military | 1 | 1 | 4.64\*1 | 1 | 4.64\*1 | 0 | 0 | # | always | 1 | 0 | 0 | 1 | 4.64\*1 | 1 | 4.64\*1 | # | obeys | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | commands | 1 | 1 | 5.64\*1 | 0 | 0 | 0 | 0 | # | of | 1 | 0 | 0 | 1 | 4.64\*1 | 1 | 4.64\*1 | # | party | 1 | 0 | 0 | 0 | 0 | 1 | 5.64\*1 | # | **SUM** | **18** | **16**(len) | **51.71** | 18(len) | 35.09 | 16(len) | 39.73 | # # # 2-Gram # # | gram | hyp | ref1 | overlaps | ref2 | overlaps | ref3 | overlaps | # | --------------- | ------ | ------- | -------- | ------- | -------- | ------- | -------- | # | It is | 1 | 1 | 0\*1 | 1 | 0\*1 | 1 | 0\*1 | # | is a | 1 | 1 | 1.58\*1 | 0 | 0 | 0 | 0 | # | a guide | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | guide to | 1 | 1 | 1\*1 | 0 | 0 | 0 | 0 | # | to action | 1 | 1 | 1\*1 | 0 | 0 | 0 | 0 | # | action which | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | which ensures | 1 | 0 | 0 | 1 | 0 | 0 | 0 | # | ensures that | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | that the | 1 | 1 | 1\*1 | 0 | 0 | 0 | 0 | # | the military | 1 | 1 | 2.17\*1 | 1 | 2.17\*1 | 0 | 0 | # | military always | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | always obeys | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | obeys the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the commands | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | commands of | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | of the | 1 | 0 | 0 | 1 | 0\*1 | 1 | 0\*1 | # | the party | 1 | 0 | 0 | 0 | 0 | 1 | 3.17\*1 | # | **SUM** | **17** | **16**(len) | **6.75** | 18(len) | 2.17 | 16(len) | 3.17 | # 3-Gram # # | gram | hyp | ref1 | overlaps | ref2 | overlaps | ref3 | overlaps | # | --------------------- | ------ | ----------- | -------- | ------- | -------- | ------- | -------- | # | It is a | 1 | 1 | 1.58\*1 | 0 | 0 | 0 | 0 | # | is a guide | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | a guide to | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | guide to action | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | to action which | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | action which ensures | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | which ensures that | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | ensures that the | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | that the military | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | the military always | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | military always obeys | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | always obeys the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | obeys the commands | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the commands of | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | commands of the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | of the party | 1 | 0 | 0 | 0 | 0 | 1 | 1\*1 | # | **SUM** | **16** | **16**(len) | **1.58** | 18(len) | 0 | 16(len) | 1 | # # # 4-Gram # # | gram | hyp | ref1 | overlaps | ref2 | overlaps | ref3 | overlaps | # | ------------------------- | ------ | ----------- | -------- | ------- | -------- | ------- | -------- | # | It is a guide | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | is a guide to | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | a guide to action | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | guide to action which | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | to action which ensures | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | action which ensures that | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | which ensures that the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | ensures that the military | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | that the military always | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the military always obeys | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | military always obeys the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | always obeys the commands | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | obeys the commands of | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the commands of the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | commands of the party | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | **SUM** | **15** | 16(len) | 0 | **18**(len) | **0** | 16(len) | 0 | # 5-Gram # # | gram | hyp | ref1 | overlaps | ref2 | overlaps | ref3 | overlaps | # | ---------------------------------- | ------ | ----------- | -------- | ------- | -------- | ------- | -------- | # | It is a guide to | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | is a guide to action | 1 | 1 | 0\*1 | 0 | 0 | 0 | 0 | # | a guide to action which | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | guide to action which ensures | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | to action which ensures that | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | action which ensures that the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | which ensures that the military | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | ensures that the military always | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | that the military always obeys | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the military always obeys the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | military always obeys the commands | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | always obeys the commands of | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | obeys the commands of the | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | the commands of the party | 1 | 0 | 0 | 0 | 0 | 0 | 0 | # | **SUM** | **14** | 16(len) | 0 | **18**(len) | **0** | 16(len) | 0 | # In[55]: sentence_nist([reference1, reference2, reference3], hypothese1) # In[202]: # bleu nltk.translate.bleu([reference1, reference2, reference3], hypothese1) # In[141]: sentence_nist([reference1, reference2, reference3], hypothese2) # In[201]: # bleu nltk.translate.bleu([reference1, reference2, reference3], hypothese2) # ## 小结 # # 通过详细的过程我们可以非常容易地看出与 BLEU 的不同: # # - 对每个 Ngram,BLEU 分别取不同 gram 下各个参考译文计数的最大值,然后与假设译文该 gram 的值对比取小的;而 NIST 会对译文的每个 gram 乘以一个权重,该权重来自所有译文,使用 Info 公式计算 # - 对每个 Ngram,BLEU 计算 P 值,使用上一步取出的每个 gram 的较小值求和除以假设译文各 gram 计数和;而 NIST 将乘以权重后的 gram 计数求和除以 gram 长度,然后取所有参考译文中最大的 # - 对每个 Ngram,BLEU 使用加权求和,NIST 直接求和 # - 惩罚因子除计算方法不同外,BLEU 只考虑与假设译文最接近的参考译文的长度,而 NIST 则考虑了每个 Ngram # ## 参考 # # - [NIST](http://localhost:8888/notebooks/Yam/All4NLP/BLEU/NIST.ipynb) # - [Automatic evaluation of machine translation quality using n-gram co-occurrence statistics](http://www.mt-archive.info/HLT-2002-Doddington.pdf) # >吐槽一下,公式真的很难看懂…… # In[ ]: