get_word2vec_model.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. import sys
  2. import os
  3. import pandas as pd
  4. from gensim.models import KeyedVectors
  5. from gensim.models import Word2Vec
  6. from loguru import logger
  7. curr_path = os.path.abspath(os.path.dirname(__file__))
  8. project_root_path = curr_path[:curr_path.find("video_to_word") + len("video_to_word")]
  9. sys.path.append(project_root_path)
  10. from config.url_and_db import ai_word_product_engine
  11. logger.remove() # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
  12. logger.add("/data/pythonProject/video_to_word/logs/train_word2vec_model.{time:YYYY-MM-DD}.log",
  13. rotation="00:00",
  14. format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
  15. level="INFO")
  16. @logger.catch()
  17. def train_word2vec_model():
  18. # 1 get corpus
  19. sql = f"select word_split from tb_asr_result where word_split is not null"
  20. doc_df = pd.read_sql(sql, ai_word_product_engine)
  21. doc_df['word_split_lst'] = doc_df['word_split'].apply(lambda x: [word for word in x.split(' ')])
  22. logger.info(f"获取到 {doc_df['word_split_lst'].shape[0]} 个脚本文件.")
  23. # 2 train the gensim word2vec model with our own corpus
  24. model = Word2Vec(doc_df['word_split_lst'].values, min_count=5, vector_size=50, workers=3, window=3, sg=1)
  25. # 3 store just the words and their trained embeddings
  26. word_vectors = model.wv
  27. # 定时任务按天执行,直接覆盖历史模型文件
  28. word_vectors.save("/data/pythonProject/video_to_word/models/word2vec.wordvectors")
  29. logger.info(f"word2vec model 训练完成!")
  30. if __name__ == '__main__':
  31. # 0 train model
  32. train_word2vec_model()
  33. # 1 load back with memory-mapping = read-only,
  34. wv = KeyedVectors.load("/data/pythonProject/video_to_word/models/word2vec.wordvectors", mmap='r')
  35. # 2 get numpy vector of a word (for test)
  36. logger.info(f"the vector of 红包 is {wv['红包']}")