首先下载wiki中文语料(大约1.7G)
https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2
下载的文件名为“zhwiki-latest-pages-articles.xml.bz2”
这个大家都知道,然后没什么卵用,因为墙太强大,所以下载不下来,只能另辟蹊径了。。。

因此搜来搜去找到2个开源语料库

https://github.com/brightmart/nlp_chinese_corpus

https://github.com/crownpku/Awesome-Chinese-NLP  这个有丰富的语料资源

维基中文语料库地址

https://dumps.wikimedia.org/zhwiki/latest/

https://dumps.wikimedia.org/zhwiki/

里面包含了wike百科语料,希望项目越来越大,内容越来越丰富

随着研究的不断深入,发展中国在语料积累上还很欠缺

都是扯犊子   下了半天每一次下载成功的,最后改用迅雷下载,终于可以下载了

http://academictorrents.com/

语料库地址目录

文本预处理  data_pre_process.py

# -*- coding: utf-8 -*-
from gensim.corpora import WikiCorpus
import jieba
from langconv import *def my_function():space = ' 'i = 0l = []zhwiki_name = './data/zhwiki-latest-pages-articles.xml.bz2'f = open('./data/reduce_zhiwiki.txt', 'w')wiki = WikiCorpus(zhwiki_name, lemmatize=False, dictionary={})for text in wiki.get_texts():for temp_sentence in text:temp_sentence = Converter('zh-hans').convert(temp_sentence)seg_list = list(jieba.cut(temp_sentence))for temp_term in seg_list:l.append(temp_term)f.write(space.join(l) + '\n')l = []i = i + 1if(i % 200 == 0):print('Saved ' + str(i) + ' articles')f.close()if __name__ == '__main__':my_function()

word2vec训练代码

# -*- coding: utf-8 -*-
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import logginglogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)def my_function():wiki_news = open('D:\\yangyang\\wiki\\wikipre\\reduce_zhiwiki.txt', 'r', encoding='utf8')# sg = 0 表示用CBOW模型训练 1表示用SKIP-GRAM模型训练# size表示词向量的维度# window 表示当前词与预测词最大距离# min_count表示最小出现次数# workers 表示训练词向量时所使用的线程数model = Word2Vec(LineSentence(wiki_news), sg=0, size=192, window=5, min_count=5, workers=9)model.save('zhiwiki_news.word2vec')if __name__ == '__main__':my_function()# 2019/6/17 19:51
# 2019-06-18 11:15:10,509 : INFO : EPOCH 1 - PROGRESS: at 0.70% examples, 65 words/s, in_qsize 18, out_qsize 0
# 训练速度太慢,要用服务器才行 24小时还不到1% 单机训练完要100天

test

# coding=utf-8
import gensimdef my_function():model = gensim.models.Word2Vec.load('./data/zhiwiki_news.word2vec')print(model.similarity('西红柿', '番茄'))  # 相似度为0.63print(model.similarity('西红柿', '香蕉'))  # 相似度为0.44word = '中国'if word in model.wv.index2word:print(model.most_similar(word))if __name__ == '__main__':my_function()

繁体字转简体字

#!/usr/bin/env python
# -*- coding: utf-8 -*-from copy import deepcopytry:import psycopsyco.full()
except:passtry:from zh_wiki import zh2Hant, zh2Hans
except ImportError:# from zhtools.zh_wiki import zh2Hant, zh2Hansimport zhtools.zh_wikiimport sys
py3k = sys.version_info >= (3, 0, 0)if py3k:UEMPTY = ''
else:_zh2Hant, _zh2Hans = {}, {}for old, new in ((zh2Hant, _zh2Hant), (zh2Hans, _zh2Hans)):for k, v in old.items():new[k.decode('utf8')] = v.decode('utf8')zh2Hant = _zh2Hantzh2Hans = _zh2HansUEMPTY = ''.decode('utf8')# states
(START, END, FAIL, WAIT_TAIL) = list(range(4))
# conditions
(TAIL, ERROR, MATCHED_SWITCH, UNMATCHED_SWITCH, CONNECTOR) = list(range(5))MAPS = {}class Node(object):def __init__(self, from_word, to_word=None, is_tail=True,have_child=False):self.from_word = from_wordif to_word is None:self.to_word = from_wordself.data = (is_tail, have_child, from_word)self.is_original = Trueelse:self.to_word = to_word or from_wordself.data = (is_tail, have_child, to_word)self.is_original = Falseself.is_tail = is_tailself.have_child = have_childdef is_original_long_word(self):return self.is_original and len(self.from_word)>1def is_follow(self, chars):return chars != self.from_word[:-1]def __str__(self):return '<Node, %s, %s, %s, %s>' % (repr(self.from_word),repr(self.to_word), self.is_tail, self.have_child)__repr__ = __str__class ConvertMap(object):def __init__(self, name, mapping=None):self.name = nameself._map = {}if mapping:self.set_convert_map(mapping)def set_convert_map(self, mapping):convert_map = {}have_child = {}max_key_length = 0for key in sorted(mapping.keys()):if len(key)>1:for i in range(1, len(key)):parent_key = key[:i]have_child[parent_key] = Truehave_child[key] = Falsemax_key_length = max(max_key_length, len(key))for key in sorted(have_child.keys()):convert_map[key] = (key in mapping, have_child[key],mapping.get(key, UEMPTY))self._map = convert_mapself.max_key_length = max_key_lengthdef __getitem__(self, k):try:is_tail, have_child, to_word  = self._map[k]return Node(k, to_word, is_tail, have_child)except:return Node(k)def __contains__(self, k):return k in self._mapdef __len__(self):return len(self._map)class StatesMachineException(Exception): passclass StatesMachine(object):def __init__(self):self.state = STARTself.final = UEMPTYself.len = 0self.pool = UEMPTYdef clone(self, pool):new = deepcopy(self)new.state = WAIT_TAILnew.pool = poolreturn newdef feed(self, char, map):node = map[self.pool+char]if node.have_child:if node.is_tail:if node.is_original:cond = UNMATCHED_SWITCHelse:cond = MATCHED_SWITCHelse:cond = CONNECTORelse:if node.is_tail:cond = TAILelse:cond = ERRORnew = Noneif cond == ERROR:self.state = FAILelif cond == TAIL:if self.state == WAIT_TAIL and node.is_original_long_word():self.state = FAILelse:self.final += node.to_wordself.len += 1self.pool = UEMPTYself.state = ENDelif self.state == START or self.state == WAIT_TAIL:if cond == MATCHED_SWITCH:new = self.clone(node.from_word)self.final += node.to_wordself.len += 1self.state = ENDself.pool = UEMPTYelif cond == UNMATCHED_SWITCH or cond == CONNECTOR:if self.state == START:new = self.clone(node.from_word)self.final += node.to_wordself.len += 1self.state = ENDelse:if node.is_follow(self.pool):self.state = FAILelse:self.pool = node.from_wordelif self.state == END:# END is a new STARTself.state = STARTnew = self.feed(char, map)elif self.state == FAIL:raise StatesMachineException('Translate States Machine ''have error with input data %s' % node)return newdef __len__(self):return self.len + 1def __str__(self):return '<StatesMachine %s, pool: "%s", state: %s, final: %s>' % (id(self), self.pool, self.state, self.final)__repr__ = __str__class Converter(object):def __init__(self, to_encoding):self.to_encoding = to_encodingself.map = MAPS[to_encoding]self.start()def feed(self, char):branches = []for fsm in self.machines:new = fsm.feed(char, self.map)if new:branches.append(new)if branches:self.machines.extend(branches)self.machines = [fsm for fsm in self.machines if fsm.state != FAIL]all_ok = Truefor fsm in self.machines:if fsm.state != END:all_ok = Falseif all_ok:self._clean()return self.get_result()def _clean(self):if len(self.machines):self.machines.sort(key=lambda x: len(x))# self.machines.sort(cmp=lambda x,y: cmp(len(x), len(y)))self.final += self.machines[0].finalself.machines = [StatesMachine()]def start(self):self.machines = [StatesMachine()]self.final = UEMPTYdef end(self):self.machines = [fsm for fsm in self.machinesif fsm.state == FAIL or fsm.state == END]self._clean()def convert(self, string):self.start()for char in string:self.feed(char)self.end()return self.get_result()def get_result(self):return self.finaldef registery(name, mapping):global MAPSMAPS[name] = ConvertMap(name, mapping)registery('zh-hant', zh2Hant)
registery('zh-hans', zh2Hans)
del zh2Hant, zh2Hansdef run():import sysfrom optparse import OptionParserparser = OptionParser()parser.add_option('-e', type='string', dest='encoding',help='encoding')parser.add_option('-f', type='string', dest='file_in',help='input file (- for stdin)')parser.add_option('-t', type='string', dest='file_out',help='output file')(options, args) = parser.parse_args()if not options.encoding:parser.error('encoding must be set')if options.file_in:if options.file_in == '-':file_in = sys.stdinelse:file_in = open(options.file_in)else:file_in = sys.stdinif options.file_out:if options.file_out == '-':file_out = sys.stdoutelse:file_out = open(options.file_out, 'wb')else:file_out = sys.stdoutc = Converter(options.encoding)for line in file_in:file_out.write(c.convert(line.rstrip('\n')))if __name__ == '__main__':run()

计算相似度

# -*- coding: utf-8 -*-
import codecs
import numpy
import gensim
import numpy as np
from keyword_extract import *wordvec_size = 192def get_char_pos(string, char):chPos=[]try:chPos=list(((pos) for pos, val in enumerate(string) if(val == char)))except:passreturn chPosdef word2vec(file_name, model):with codecs.open(file_name, 'r') as f:word_vec_all = numpy.zeros(wordvec_size)for data in f:space_pos = get_char_pos(data, ' ')first_word = data[0:space_pos[0]]if model.__contains__(first_word):word_vec_all = word_vec_all+model[first_word]for i in range(len(space_pos) - 1):word = data[space_pos[i]:space_pos[i + 1]]if model.__contains__(word):word_vec_all = word_vec_all+model[word]return word_vec_all# 计算2个向量的余弦相似度
def simlarityCalu(vector1, vector2):vector1Mod = np.sqrt(vector1.dot(vector1))vector2Mod = np.sqrt(vector2.dot(vector2))if vector2Mod != 0 and vector1Mod != 0:simlarity = (vector1.dot(vector2))/(vector1Mod*vector2Mod)else:simlarity = 0return simlarityif __name__ == '__main__':model = gensim.models.Word2Vec.load('data/zhiwiki_news.word2vec')p1 = './data/P1.txt'p2 = './data/P2.txt'p1_keywords = './data/P1_keywords.txt'p2_keywords = './data/P2_keywords.txt'getKeywords(p1, p1_keywords)getKeywords(p2, p2_keywords)p1_vec = word2vec(p1_keywords, model)p2_vec = word2vec(p2_keywords, model)print(simlarityCalu(p1_vec, p2_vec))

doc2vec

#!/usr/bin/env python
# -*- coding: utf-8 -*-import gensim.models as g
from gensim.corpora import WikiCorpus
import logging
from langconv import *# enable logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)docvec_size = 192class TaggedWikiDocument(object):def __init__(self, wiki):self.wiki = wikiself.wiki.metadata = Truedef __iter__(self):import jiebafor content, (page_id, title) in self.wiki.get_texts():yield g.doc2vec.LabeledSentence(words=[w for c in contentfor w in jieba.cut(Converter('zh-hans').convert(c))], tags=[title])def my_function():zhwiki_name = './data/zhwiki-latest-pages-articles.xml.bz2'wiki = WikiCorpus(zhwiki_name, lemmatize=False, dictionary={})documents = TaggedWikiDocument(wiki)model = g.Doc2Vec(documents, dm=0, dbow_words=1, size=docvec_size, window=8, min_count=19, iter=5, workers=8)model.save('data/zhiwiki_news.doc2vec')if __name__ == '__main__':my_function()

wiki中文文本语料下载,在维基百科中文语料训练Word2vec and doc2vec 文本向量化代码示例相关推荐

  1. wiki维基百科各种语料数据下载

    wiki维基百科常用语料下载路径 英文语料路径: https://dumps.wikimedia.org/enwiki/ 中文语料路径: https://dumps.wikimedia.org/zhw ...

  2. 用Python3.6来做维基百科中文语料

    首先介绍一下word2vec 参考http://www.cnblogs.com/iloveai/p/word2vec.html 2013年,Google开源了一款用于词向量计算的工具--word2ve ...

  3. 维基百科简体中文语料训练word2vec词向量

    步骤: 1.下载维基百科中文语料 2.使用工具从压缩包中抽取正文文本 3.将繁体字转简体字 4.分词 5.训练模型 6.测试模型 1.下载维基百科中文语料 语料下载地址:https://dumps.w ...

  4. Python Djang 搭建自动词性标注网站(基于Keras框架和维基百科中文预训练词向量Word2vec模型,分别实现由GRU、LSTM、RNN神经网络组成的词性标注模型)

    引言 本文基于Keras框架和维基百科中文预训练词向量Word2vec模型,分别实现由GRU.LSTM.RNN神经网络组成的词性标注模型,并且将模型封装,使用python Django web框架搭建 ...

  5. 【NLP】维基百科中文数据训练word2vec词向量模型——基于gensim库

    前言   本篇主要是基于gensim 库中的 Word2Vec 模型,使用维基百科中文数据训练word2vec 词向量模型,大体步骤如下: 数据预处理 模型的训练 模型的测试 准备条件: Window ...

  6. 维基百科中文创始人_维基百科的创始人正在建立一个社区运营的新闻网站

    维基百科中文创始人 Back in 2001, Jimmy Wales co-founded Wikipedia with the mission of "empowering and en ...

  7. windows下使用word2vec训练维基百科中文语料全攻略!(三

    全文共1552个字,7张图,预计阅读时间8分钟. 训练一个聊天机器人的很重要的一步是词向量训练,无论是生成式聊天机器人还是检索式聊天机器人,都需要将文字转化为词向量,时下最火的词向量训练模型是word ...

  8. windows下使用word2vec训练维基百科中文语料全攻略!(二)

    全文共454个字,3张图,预计阅读时间5分钟. 训练一个聊天机器人的很重要的一步是词向量训练,无论是生成式聊天机器人还是检索式聊天机器人,都需要将文字转化为词向量,时下最火的词向量训练模型是word2 ...

  9. java 中文 转义_java下载url路径包含中文需要转义的操作

    大家看这个url: http://127.0.0.1:9997/file/江苏省地震应急预案(二级).docx 应为url路径中有中文没有转义,输入流报错 The valid characters a ...

最新文章

  1. oracle如何添加非空约束,oracle 怎么用sql删除非空约束?
  2. 微服务之数据同步Porter
  3. 0基础学python-从0开始学Python,0基础小白
  4. python中定义类
  5. VMprotect静态跟踪 字节码反编译
  6. Keiichi Tsuchiya the Drift King
  7. linux安装 redis,在linux中安装redis
  8. 使用PHP建立SVN的远程钩子,使用exec命令自动更新SVN的代码
  9. c语言如何查看内置函数,C中函数内部的函数
  10. 【今日CV 计算机视觉论文速览 第146期】Mon, 22 Jul 2019
  11. Oracle存储过程基本语法介绍
  12. 文件服务器复杂权限,运用技巧:如何提高文件服务器权限?
  13. P2668 斗地主 贪心+深搜
  14. 洛谷试炼场P1035 级数求和
  15. 无线路由器设置成交换机
  16. python固定效应模型_panel data做固定效应模型时有下列几种方法
  17. 如何用awk打印除第一列之外的所有列
  18. 抖音如何变现(赚钱)
  19. 用Java编写日历表(3种方式)
  20. Gannicus Guo的DIY TCP/IP之旅

热门文章

  1. 亮度、饱和度、对比度、灰度 RGBHSV
  2. Photoshop使用技巧
  3. 统计学笔记——统计推断——参数估计
  4. 码云团队如何使用码云?
  5. 在linux中使用getch()函数
  6. Android 如何OTG 鼠标,otg键盘和鼠标映射软件v5.2.0 Android版
  7. canvas简单实现纯色背景图片抠图
  8. 计算机网络ospf实验报告,计算机网络实验报告12_ospf实验
  9. php 备份数据库插件,zblog php数据库备份插件TiQuan_DB_Backups
  10. Go XP开发,以GoLand为例