!文章内容如有错误或排版问题,请提交反馈,非常感谢!
目前类似搜狗输入法、百度输入法等最大的亮点是有较好的词库,而这些词库除了用在搜索上还可以用在及其场景,比如说分词。目前每种输入法的词库都有自己的格式,今天我们就来研究下,如何反编译这些词库,将这些词库使用到其他的场景。
搜狗输入法词库解析
搜狗输入法提供的词库下载下来是scel格式的,在使用前把他转成txt,以下为转换方法:
# -*- coding: utf-8 -*- import struct import sys # 搜狗的scel词库就是保存的文本的unicode编码,每两个字节一个字符(中文汉字或者英文字母) # 找出其每部分的偏移位置即可 # 主要两部分 # 1.全局拼音表,貌似是所有的拼音组合,字典序 # 格式为(index,len,pinyin)的列表 # index:两个字节的整数代表这个拼音的索引 # len:两个字节的整数拼音的字节长度 # pinyin:当前的拼音,每个字符两个字节,总长len # # 2.汉语词组表 # 格式为(same,py_table_len,py_table,{word_len,word,ext_len,ext})的一个列表 # same:两个字节整数同音词数量 # py_table_len:两个字节整数 # py_table:整数列表,每个整数两个字节,每个整数代表一个拼音的索引 # # word_len:两个字节整数代表中文词组字节数长度 # word:中文词组,每个中文汉字两个字节,总长度word_len # ext_len:两个字节整数代表扩展信息的长度,好像都是10 # ext:扩展信息前两个字节是一个整数(不知道是不是词频)后八个字节全是0 # # {word_len,word,ext_len,ext}一共重复same次同音词相同拼音表 # 拼音表偏移, startPy = 0x1540 # 汉语词组表偏移 startChinese = 0x2628 # 全局拼音表 GPy_Table = {} # 解析结果 # 元组(词频,拼音,中文词组)的列表 GTable = [] def byte2str(data): '''将原始字节码转为字符串''' i = 0 length = len(data) ret = u'' while i < length: x = data[i] + data[i+1] t = unichr(struct.unpack('H',x)[0]) if t == u'\r': ret += u'\n' elif t != u'': ret += t i += 2 return ret # 获取拼音表 def getPyTable(data): if data[0:4] != "\x9D\x01\x00\x00": return None data = data[4:] pos = 0 length = len(data) while pos < length: index = struct.unpack('H',data[pos]+data[pos+1])[0] # print index, pos += 2 l = struct.unpack('H',data[pos]+data[pos+1])[0] # print l, pos += 2 py = byte2str(data[pos:pos+l]) # print py GPy_Table[index] = py pos += l # 获取一个词组的拼音 def getWordPy(data): pos = 0 length = len(data) ret = u'' while pos < length: index = struct.unpack('H',data[pos]+data[pos+1])[0] ret += GPy_Table[index] pos += 2 return ret # 获取一个词组 def getWord(data): pos = 0 length = len(data) ret = u'' while pos < length: index = struct.unpack('H',data[pos]+data[pos+1])[0] ret += GPy_Table[index] pos += 2 return ret # 读取中文表 def getChinese(data): # import pdb # pdb.set_trace() pos = 0 length = len(data) while pos < length: # 同音词数量 same = struct.unpack('H',data[pos]+data[pos+1])[0] # print '[same]:',same, # 拼音索引表长度 pos += 2 py_table_len = struct.unpack('H',data[pos]+data[pos+1])[0] # 拼音索引表 pos += 2 py = getWordPy(data[pos:pos+py_table_len]) # 中文词组 pos += py_table_len for i in xrange(same): # 中文词组长度 c_len = struct.unpack('H',data[pos]+data[pos+1])[0] # 中文词组 pos += 2 word = byte2str(data[pos:pos+c_len]) # 扩展数据长度 pos += c_len ext_len = struct.unpack('H',data[pos]+data[pos+1])[0] # 词频 pos += 2 count = struct.unpack('H',data[pos]+data[pos+1])[0] # 保存 GTable.append((count,py,word)) # 到下个词的偏移位置 pos += ext_len def deal(file_name): print '-'*60 f = open(file_name,'rb') data = f.read() f.close() if data[0:12] != "\x40\x15\x00\x00\x44\x43\x53\x01\x01\x00\x00\x00": print "确认你选择的是搜狗(.scel)词库?" sys.exit(0) # pdb.set_trace() print "词库名:", byte2str(data[0x130:0x338]) # .encode('GB18030') print "词库类型:", byte2str(data[0x338:0x540]) # .encode('GB18030') print "描述信息:", byte2str(data[0x540:0xd40]) # .encode('GB18030') print "词库示例:", byte2str(data[0xd40:startPy]) # .encode('GB18030') getPyTable(data[startPy:startChinese]) getChinese(data[startChinese:]) if __name__ == '__main__': # 将要转换的词库添加在这里就可以了 f = "搜狗标准词库.scel".decode("utf-8") deal(f) # 保存结果 f = open('sougou.txt','w') for count,py,word in GTable: # GTable保存着结果,是一个列表,每个元素是一个元组(词频,拼音,中文词组),有需要的话可以保存成自己需要个格式 # 我没排序,所以结果是按照上面输入文件的顺序 f.write(unicode('{%(count)s}'%{'count':count}+py+' '+word).encode('GB18030')) # 最终保存文件的编码,可以自给改 f.write('\n') f.close()
其他参考资料:
百度输入法词库解析
#!/usr/bin/env python # -*- coding: utf-8 -*- import struct from tools import * class bdict(BaseDictFile): def __init__(self): BaseDictFile.__init__(self) # 文件头 self.head = 'biptbdsw' # 文件终点偏移 self.offset = 0x60 self.end_position = 0 # 词表偏移 self.dict_start = 0x350 self.shengmu = ["c", "d", "b", "f", "g", "h", "ch", "j", "k", "l", "m", "n", "", "p", "q", "r", "s", "t", "sh", "zh", "w", "x", "y", "z"] self.yunmu = ["uang", "iang", "iong", "ang", "eng", "ian", "iao", "ing", "ong", "uai", "uan", "ai", "an", "ao", "ei", "en", "er", "ua", "ie", "in", "iu", "ou", "ia", "ue", "ui", "un", "uo", "a", "e", "i", "o", "u", "v"] def _get_word_len(self, data, pos=0): # 单词长度 length = struct.unpack('I', data[pos:pos+4])[0] char = struct.unpack('B', data[pos+4])[0] pure_english = False if 0x41<= char<= 0x7a: pure_english = True # print(repr(data[pos:pos+length*3+1])) return (length, pure_english) def _get_word(self, data, length=0, pure_english=False): pos = 0 word = Word() pinyin = [] for i in xrange(length): char = struct.unpack('B', data[pos])[0] pos += 1 if pure_english: # 如果读取到的首个hex值落在字母区域(0x41~0x7a)内,说明是纯英文,后面的内容直接读取即可 pinyin.append(chr(char).lower()) word.value += chr(char) else: if char == 0xff: # 声母部分如果是'\xff',说明这是中英文混输的英文字母,不需要做拼音词表转换,直接读取韵母部分即可 sm = '' ym = struct.unpack('c', data[pos])[0] pos += 1 pinyin.append('' + ym) else: sm = char ym = struct.unpack('B', data[pos])[0] pos += 1 pinyin.append(self.shengmu[sm] + self.yunmu[ym]) # try: # pinyin.append(self.shengmu[sm] + self.yunmu[ym]) # except IndexError: # print(repr(data)) # raise IndexError if pure_english: word.pinyin = ''.join(pinyin) else: word.pinyin = ''.join(pinyin) hanzi = byte2str(data[pos:pos+length*2]) pos = pos + length*2 word.value = hanzi.encode('utf-8') return word def get_dict_info(self, data): self.end_position = struct.unpack('I', data[self.offset:self.offset+4])[0] def load(self, filename): f = open(filename, 'rb') data = f.read() f.close() if data[0:8] != self.head: print "It's not a bdict file" sys.exit(1) return self.read(data) def read(self, data): self.get_dict_info(data) pos = self.dict_start while pos< self.end_position: # print(pos, self.end_position) # if pos >= self.end_position: # break # 这里的长度是算字数的,实际长度是拼音数(length*2)+字符长度(length*2) length, pure_english = self._get_word_len(data, pos=pos) pos += 4 # print('*'*60) # print(repr(data[pos:pos+length*4])) if pure_english: word = self._get_word(data[pos:pos+length], length=length, pure_english=True) pos = pos + length else: word = self._get_word(data[pos:pos+length*4], length=length) pos = pos + length*4 if word.value: if self.dictionary.has_key(word.pinyin): self.dictionary[word.pinyin].append(word) else: self.dictionary[word.pinyin] = [] self.dictionary[word.pinyin].append(word) return self.dictionary
其他参考资料:
QQ拼音输入法
JAVA版本:
/* Copyright (c) 2010 Xiaoyun Zhu * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ import java.io.ByteArrayOutputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.zip.InflaterOutputStream; /** * QQPinyinIME QPYD File Reader * * <pre> * QPYD Format overview: * * General Information: * - Chinese characters are all encoded with UTF-16LE. * - Pinyin are encoded in ascii (or UTF-8). * - Numbers are using little endian byte order. * * QPYD hex analysis: * - 0x00 QPYD file identifier * - 0x38 offset of compressed data (word-pinyin-dictionary) * - 0x44 total words in qpyd * - 0x60 start of header information * * Compressed data analysis: * - zip/standard (beginning with 0x789C) is used in (all analyzed) qpyd files * - data is divided in two parts * -- 1. offset and length information (16 bytes for each pinyin-word pair) * 0x06 offset points to first pinyin * 0x00 length of pinyin * 0x01 length of word * -- 2. actual data * Dictionary data has the form ((pinyin)(word))* with no separators. * Data can only be read using offset and length information. * * </pre> * */ public class QQPinyinQpydReader { public static void main(final String[] args) throws IOException { // download from http://dict.py.qq.com/list.php final String qqydFile = "D:\\test.qpyd"; // read qpyd into byte array final ByteArrayOutputStream dataOut = new ByteArrayOutputStream(); try (RandomAccessFile file = new RandomAccessFile(qqydFile, "r"); final FileChannel fChannel = file.getChannel();) { fChannel.transferTo(0, fChannel.size(), Channels.newChannel(dataOut)); } // qpyd as bytes final ByteBuffer dataRawBytes = ByteBuffer.wrap(dataOut.toByteArray()); dataRawBytes.order(ByteOrder.LITTLE_ENDIAN); System.out.println("文件:" + qqydFile); // read info of compressed data final int startZippedDictAddr = dataRawBytes.getInt(0x38); final int zippedDictLength = dataRawBytes.limit() - startZippedDictAddr; // qpys as UTF-16LE string final String dataString = new String(Arrays.copyOfRange(dataRawBytes.array(), 0x60, startZippedDictAddr), "UTF-16LE"); // print header System.out.println("名称:" + QQPinyinQpydReader.substringBetween(dataString, "Name:", "\r\n")); System.out.println("类型:" + QQPinyinQpydReader.substringBetween(dataString, "Type:", "\r\n")); System.out.println("子类型:" + QQPinyinQpydReader.substringBetween(dataString, "FirstType:", "\r\n")); System.out.println("词库说明:" + QQPinyinQpydReader.substringBetween(dataString, "Intro:", "\r\n")); System.out.println("词库样例:" + QQPinyinQpydReader.substringBetween(dataString, "Example:", "\r\n")); System.out.println("词条数:" + dataRawBytes.getInt(0x44)); // read zipped qqyd dictionary into byte array dataOut.reset(); try (InflaterOutputStream inflater = new InflaterOutputStream(dataOut);) { Channels.newChannel(inflater).write(ByteBuffer.wrap(dataRawBytes.array(), startZippedDictAddr, zippedDictLength)); } // uncompressed qqyd dictionary as bytes final ByteBuffer dataUnzippedBytes = ByteBuffer.wrap(dataOut.toByteArray()); dataUnzippedBytes.order(ByteOrder.LITTLE_ENDIAN); // for debugging: save unzipped data to *.unzipped file```java try (FileOutputStream out = new FileOutputStream(qqydFile + ".unzipped");) { Channels.newChannel(out).write(dataUnzippedBytes); System.out.println("压缩数据:0x" + Integer.toHexString(startZippedDictAddr) + "(解压前:" + zippedDictLength + "B, 解压后:" + dataUnzippedBytes.limit() + "B)"); } // stores the start address of actual dictionary data int unzippedDictStartAddr = -1; final byte[] byteArray = dataUnzippedBytes.array(); dataUnzippedBytes.position(0); while ((unzippedDictStartAddr == -1) || (dataUnzippedBytes.position() < unzippedDictStartAddr)) { // read word final int pinyinLength = dataUnzippedBytes.get() & 0xff; final int wordLength = dataUnzippedBytes.get() & 0xff; dataUnzippedBytes.getInt(); // garbage final int pinyinStartAddr = dataUnzippedBytes.getInt(); final int wordStartAddr = pinyinStartAddr + pinyinLength; if (unzippedDictStartAddr == -1) { unzippedDictStartAddr = pinyinStartAddr; System.out.println("词库地址(解压后):0x" + Integer.toHexString(unzippedDictStartAddr) + "\n"); } final String pinyin = new String(Arrays.copyOfRange(byteArray, pinyinStartAddr, pinyinStartAddr + pinyinLength), "UTF-8"); final String word = new String(Arrays.copyOfRange(byteArray, wordStartAddr, wordStartAddr + wordLength), "UTF-16LE"); System.out.println(word + "\t" + pinyin); } } public static final String substringBetween(final String text, final String start, final String end) { final int nStart = text.indexOf(start); final int nEnd = text.indexOf(end, nStart + 1); if ((nStart != -1) && (nEnd != -1)) { return text.substring(nStart + start.length(), nEnd); } else { return null; } } }
其他参考资料:
常见输入法词库下载地址
- 搜狗词库:http://pinyin.sogou.com/dict/
- 百度词库:https://shurufa.baidu.com/dict
- 腾讯词库:http://dict.qq.pinyin.cn/
- 批量下载:https://github.com/ltf/lab/tree/master/plab/thesaurus/spider
```