123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228 |
- import tornado.web
- import pandas as pd
- import json
- import numpy as np
- import datetime
- from config import *
- import traceback
- import hashlib
- import jieba
- import jieba.analyse
- from operator import itemgetter
- import requests
- import docx
- import re
- # 设置停用词--分词之后去掉的词
- jieba.analyse.set_stop_words('./simhash_model/doc/etl_stopword.txt')
- class OnlineSimHashSimilarity(tornado.web.RequestHandler):
- def initialize(self, pre_remove_words, logger):
- self.pre_remove_words = pre_remove_words
- self.logger = logger
- def post(self, script_md5):
- res = {
- 'script_md5': script_md5,
- 'similartiry_script': None
- }
- self.logger.info("************************************** NEW REQUEST ***************************************")
- self.logger.info("the md5 of request script is %s" % script_md5)
- try:
- inst = ParseContentAndGetsimhash(script_md5, self.pre_remove_words)
- # 提取台词
- inst.parse_content()
- self.logger.info("the parsed content is %s", str(inst.content))
- # 计算 simhash
- inst.get_simhash()
- self.logger.info("the simhash is %s", str(inst.finger_print))
- self.logger.info("the keyword and weight is %s", str(inst.keyword_weight))
- # simhash 写入数据库
- inst.write_db()
- self.logger.info("write to database has completed !")
- # 与数据库里所有的剧本进行海明距离计算,过滤出与之相识度高的剧本
- simi_inst = GetSimilarityScripts(inst.file_name, inst.script_md5, inst.finger_print)
- if simi_inst.similarity_scripts:
- res['similartiry_script'] = simi_inst.similarity_scripts
- self.logger.info("there are {} similarity script: {} !".
- format(len(eval(simi_inst.similarity_scripts)),
- simi_inst.similarity_scripts))
- # 相似度的剧本信息写入数据库
- simi_inst.write_db()
- self.logger.info("SimilarityScripts write to database has completed !")
- else:
- self.logger.info("no similarity script!")
- except Exception:
- self.logger.error(traceback.format_exc())
- # 返回接口结果
- result_str = json.dumps(res)
- self.write(result_str)
- self.flush()
- class ParseContentAndGetsimhash(object):
- """
- 1、根据 script_md5 从数据库库里查找文档的下载地址,下载文档;
- 2、解析文档中的台词;
- 3、计算台词的 simhash 码。
- """
- def __init__(self, script_md5, pre_remove_words):
- self.script_md5 = script_md5
- self.pre_remove_words = pre_remove_words
- self.file_name = None
- self.content = None
- self.finger_print = None
- self.keyword_weight = None
- def parse_content(self):
- # 通过url将文件下载到本地
- sql = """select * from ctop_script_file where id = '%s' """ % self.script_md5
- df = pd.read_sql(sql, engine)
- download_url = df['download_url'].values[0]
- self.file_name = df['file_name'].values[0]
- r = requests.get(download_url)
- with open('./simhash_model/file_cache/%s' % self.file_name, 'wb') as code:
- code.write(r.content)
- # 解析本地的文件,提取台词
- # TODO: 台词的提取解析,以每段的第一个冒号进行分割,冒号之前包含'录屏、 场景、演员'等关键字,则不提取冒号之后的内容。
- doc = docx.Document('./simhash_model/file_cache/%s' % self.file_name)
- script_list = [para.text for para in doc.paragraphs]
- self.content = ''.join(script_list)
- # 台词预处理,去除产品名称/角色名等
- for word in self.pre_remove_words:
- pattern = re.compile(word)
- self.content = re.sub(pattern, " ", self.content)
- def get_simhash(self):
- sim = SimHash(self.content)
- self.finger_print = sim.finger_print
- self.keyword_weight = sim.keyword_weight
- def write_db(self):
- res_dict = {
- "id": self.script_md5,
- "file_name": self.file_name,
- "finger_print": self.finger_print,
- "create_time": datetime.datetime.now()
- }
- res_df = pd.DataFrame.from_dict(res_dict, orient='index').T
- print("id of engine", id(engine))
- res_df.to_sql(name='ctop_script_fingerprint_v3', con=engine, if_exists='append', index=False)
- class SimHash(object):
- def __init__(self, content):
- self.content = content
- self.finger_print = None
- self.keyword_weight = None
- self.sim_hash()
- def sim_hash(self):
- """
- 计算文档的simHash指纹
- :return:64位的01字符串
- """
- seg_list = jieba.cut(self.content, cut_all=False) # 精确模式
- # 如果 topK<=30,则提取 前30个权重的关键词,并按照keyword进行排序
- if topK <= 30:
- keyword_weight = jieba.analyse.extract_tags("|".join(seg_list), 30, withWeight=True)
- else:
- keyword_weight = jieba.analyse.extract_tags("|".join(seg_list), topK, withWeight=True)
- # 如果没有关键信息,则直接返回,finger_print 为 None
- if len(keyword_weight) == 0:
- return
- # sort by weight then by keyword
- sorted_keyword_weight = sorted(keyword_weight, key=itemgetter(1, 0), reverse=True)[:topK]
- self.keyword_weight = sorted_keyword_weight
- weight_hash_list = []
- for keyword, weight in sorted_keyword_weight:
- weight = int(10 * weight) # 与使用原始的weight的区别?
- # 获取单词的哈希码
- str_hash = self.build_in_hash(keyword)
- weight_hash = [weight if b == '1' else -weight for b in str_hash]
- weight_hash_list.append(weight_hash)
- weight_sum = np.sum(np.array(weight_hash_list), axis=0)
- self.finger_print = ''.join(['1' if i > 0 else '0' for i in weight_sum])
- def build_in_hash(self, keyword):
- """
- 使用 hashlib.md5 计算关键词的哈希码 (整个词语直接调用该函数,不用挨个单字调用)
- :return:64位的二进制字符串
- """
- truncate_mask = 2 ** 64 - 1
- bitstring_format = '0{}b'.format(64)
- h = int(hashlib.md5(keyword.encode('utf-8')).hexdigest(), 16) # 16进制转为10进制
- h_bits = format(h & truncate_mask, bitstring_format) # 截取为64位的二进制字符串
- return h_bits
- class GetSimilarityScripts(object):
- """
- 获取数据库里的所有 simhash,并与之进行海明距离计算,返回距离小于阈值的脚本对象。
- 如果存在高相似度的文档,则将相关信息写入数据库
- """
- def __init__(self, file_name, script_md5, finger_print):
- self.file_name = file_name
- self.script_md5 = script_md5
- self.finger_print = finger_print
- self.similarity_scripts = ""
- self.get_similarity_scripts()
- def get_similarity_scripts(self):
- sql = """select id, file_name, finger_print from ctop_script_fingerprint_v3"""
- df = pd.read_sql(sql, engine)
- df['distance'] = df['finger_print'].apply(lambda x: self.hamming_dis(x))
- sim_df = df[df['distance'] == distance_threshold]
- sim_df.reset_index(drop=True, inplace=True)
- # 列表3元组形式:"[('name1', 'md5', 1), ('name2', 'md5', 4), ('name3', 'md5', 9)]"
- if len(sim_df) > 0:
- sim_list = []
- for i in range(len(sim_df)):
- sim_list.append((sim_df.loc[i, 'file_name'],
- sim_df.loc[i, 'id'],
- sim_df.loc[i, 'distance']))
- self.similarity_scripts = str(sim_list)
- else:
- pass
- def hamming_dis(self, another_finger_print):
- # 如果其中一个为空,则距离返回为空
- if not self.finger_print or not another_finger_print:
- return
- h1 = '0b' + self.finger_print
- h2 = '0b' + another_finger_print
- n = int(h1, 2) ^ int(h2, 2)
- cnt = 0
- while n:
- n &= (n - 1)
- cnt += 1
- return cnt
- def write_db(self):
- res_dict = {
- "id": self.script_md5,
- "file_name": self.file_name,
- "similarity": self.similarity_scripts,
- "create_time": datetime.datetime.now()
- }
- res_df = pd.DataFrame.from_dict(res_dict, orient='index').T
- res_df.to_sql(name='ctop_script_similarity', con=engine, if_exists='append', index=False)
|