|
@@ -0,0 +1,41 @@
|
|
|
+import pandas as pd
|
|
|
+from gensim.models import KeyedVectors
|
|
|
+from gensim.models import Word2Vec
|
|
|
+from loguru import logger
|
|
|
+
|
|
|
+from config.url_and_db import ai_word_product_engine
|
|
|
+
|
|
|
+logger.remove() # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
|
|
|
+logger.add("/data/pythonProject/video_to_word/logs/train_word2vec_model.{time:YYYY-MM-DD}.log",
|
|
|
+ rotation="00:00",
|
|
|
+ format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
|
|
|
+ level="INFO")
|
|
|
+
|
|
|
+
|
|
|
+@logger.catch()
|
|
|
+def train_word2vec_model():
|
|
|
+ # 1 get corpus
|
|
|
+ sql = f"select word_split from tb_asr_result where word_split is not null"
|
|
|
+ doc_df = pd.read_sql(sql, ai_word_product_engine)
|
|
|
+ doc_df['word_split_lst'] = doc_df['word_split'].apply(lambda x: [word for word in x.split(' ')])
|
|
|
+ logger.info(f"获取到 {doc_df['word_split_lst'].shape[0]} 个脚本文件.")
|
|
|
+
|
|
|
+ # 2 train the gensim word2vec model with our own corpus
|
|
|
+ model = Word2Vec(doc_df['word_split_lst'].values, min_count=5, vector_size=50, workers=3, window=3, sg=1)
|
|
|
+
|
|
|
+ # 3 store just the words and their trained embeddings
|
|
|
+ word_vectors = model.wv
|
|
|
+ # 定时任务按天执行,直接覆盖历史模型文件
|
|
|
+ word_vectors.save("/data/pythonProject/video_to_word/models/word2vec.wordvectors")
|
|
|
+ logger.info(f"word2vec model 训练完成!")
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+ # 0 train model
|
|
|
+ train_word2vec_model()
|
|
|
+
|
|
|
+ # 1 load back with memory-mapping = read-only,
|
|
|
+ wv = KeyedVectors.load("/data/pythonProject/video_to_word/models/word2vec.wordvectors", mmap='r')
|
|
|
+
|
|
|
+ # 2 get numpy vector of a word (for test)
|
|
|
+ logger.info(f"the vector of 红包 is {wv['红包']}")
|