-
Notifications
You must be signed in to change notification settings - Fork 51
/
wordseg.py
142 lines (120 loc) · 5.41 KB
/
wordseg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#-*- coding:utf-8 -*-
"""
Chinese word segmentation algorithm with corpus
Author: "Xylander"
"""
import os
import re
import math
import time
from entropy import compute_entropy
from extract import extract_cadicateword,gen_bigram
import pandas as pd
class wordinfo(object):
'''
Record every candidate word information include left neighbors, right neighbors, frequency, PMI
'''
def __init__(self,text):
super(wordinfo,self).__init__()
self.text = text
self.freq = 0.0
self.left = [] #record left neighbors
self.right = [] #record right neighbors
self.pmi = 0
def update_data(self,left,right):
self.freq += 1.0
if left:
self.left.append(left)
if right:
self.right.append(right)
def compute_indexes(self,length):
#compute frequency of word,and left/right entropy
self.freq /= length
self.left = compute_entropy(self.left)
self.right = compute_entropy(self.right)
def compute_pmi(self,words_dict):
#compute all kinds of combines for word
sub_part = gen_bigram(self.text)
if len(sub_part) > 0:
self.pmi = min(map(lambda (left,right) : math.log(self.freq/words_dict[left].freq/words_dict[right].freq),sub_part))
class segdocument(object):
'''
Main class for Chinese word segmentation
1. Generate words from a long enough document
2. Do the segmentation work with the document
reference:
'''
def __init__(self,doc,max_word_len=5,min_tf=0.000005,min_entropy=0.07,min_pmi=6):
super(segdocument,self).__init__()
self.max_word_len = max_word_len
self.min_tf = min_tf
self.min_entropy = min_entropy
self.min_pmi = min_pmi
#analysis documents
self.word_info = self.gen_words(doc)
count = float(len(self.word_info))
self.avg_frq = sum(map(lambda w : w.freq,self.word_info))/count
self.avg_entropy = sum(map(lambda w : min(w.left,w.right),self.word_info))/count
self.avg_pmi = sum(map(lambda w:w.pmi,self.word_info)) / count
filter_function = lambda f:len(f.text) > 1 and f.pmi > self.min_pmi and f.freq > self.min_tf\
and min(f.left,f.right) > self.min_entropy
self.word_tf_pmi_ent = map(lambda w :(w.text,len(w.text),w.freq,w.pmi,min(w.left,w.right)),filter(filter_function,self.word_info))
def gen_words(self,doc):
#pattern = re.compile('[:“。”,!?、《》……;’‘\n——\r\t)、(——^[1-9]d*$]')
#pattern = re.compile('[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。??:、~@#”“¥:%……&*()]+|[[A-Za-z0-9]*$]'.decode('utf-8'))
pattern = re.compile(u'[\\s\\d,.<>/?:;\'\"[\\]{}()\\|~!@#$%^&*\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')
doc = pattern.sub(r'',doc)
word_index = extract_cadicateword(doc,self.max_word_len)
word_cad = {} #后选词的字典
for suffix in word_index:
word = doc[suffix[0]:suffix[1]]
if word not in word_cad:
word_cad[word] = wordinfo(word)
# record frequency of word and left neighbors and right neighbors
word_cad[word].update_data(doc[suffix[0]-1:suffix[0]],doc[suffix[1]:suffix[1]+1])
length = len(doc)
#computing frequency of candicate word and entropy of left/right neighbors
for word in word_cad:
word_cad[word].compute_indexes(length)
#ranking by length of word
values = sorted(word_cad.values(),key=lambda x:len(x.text))
for v in values:
if len(v.text) == 1:
continue
v.compute_pmi(word_cad)
# ranking by freq
return sorted(values,key = lambda v: len(v.text),reverse = False)
if __name__ == '__main__':
starttime = time.clock()
path = os.path.abspath('.')
wordlist = []
word_candidate = []
dict_bank = []
dict_path = path + '/dict.txt'
decode_list = ['gb18030','gbk','utf-8','ISO-8859-2','unicode'] #providing multiple decode ways
for decode_way in decode_list:
try:
doc = open(path+'/guangkai.txt','r').read().decode(decode_way)
print 'Great!{0} success to decode the document!!!'.format(decode_way)
break
except:
print 'Oops!{0} cannot decode the document!'.format(decode_way)
word = segdocument(doc,max_word_len=4,min_tf=0,min_entropy=0.05,min_pmi=3.3)
print '平均频率:'+ str(word.avg_frq)
print '平均pmi:' + str(word.avg_pmi)
print '平均自由度:'+ str(word.avg_entropy)
for i in open(dict_path,'r'):
dict_bank.append(i.split(' ')[0])
print 'result:'
for i in word.word_tf_pmi_ent:
if i[0].encode('utf-8') not in dict_bank:
word_candidate.append(i[0].encode('utf-8'))
wordlist.append([i[0].encode('utf-8'),i[1],i[2],i[3],i[4]])
seg = pd.DataFrame(wordlist,columns=['word','length','fre','pmi','entropy'])
seg.to_csv(path+'/extractword.csv',index=False)
# intersection = set(word_candidate) & set(dict_bank)
# newwordset = set(word_candidate) - intersection
for i in wordlist:
print i[0],i[1],i[2],i[3],i[4]
endtime = time.clock()
print endtime-starttime