get_word2vec_model.py 1.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. import pandas as pd
  2. from gensim.models import KeyedVectors
  3. from gensim.models import Word2Vec
  4. from loguru import logger
  5. from config.url_and_db import ai_word_product_engine
  6. logger.remove() # 删去 import logger之后自动产生的handler,不删除的话会出现重复输出的现象
  7. logger.add("/data/pythonProject/video_to_word/logs/train_word2vec_model.{time:YYYY-MM-DD}.log",
  8. rotation="00:00",
  9. format="{time:YYYY-MM-DD HH:mm:ss,SSS} [{process}] [{thread}] {level} {file} {line} - {message}",
  10. level="INFO")
  11. @logger.catch()
  12. def train_word2vec_model():
  13. # 1 get corpus
  14. sql = f"select word_split from tb_asr_result where word_split is not null"
  15. doc_df = pd.read_sql(sql, ai_word_product_engine)
  16. doc_df['word_split_lst'] = doc_df['word_split'].apply(lambda x: [word for word in x.split(' ')])
  17. logger.info(f"获取到 {doc_df['word_split_lst'].shape[0]} 个脚本文件.")
  18. # 2 train the gensim word2vec model with our own corpus
  19. model = Word2Vec(doc_df['word_split_lst'].values, min_count=5, vector_size=50, workers=3, window=3, sg=1)
  20. # 3 store just the words and their trained embeddings
  21. word_vectors = model.wv
  22. # 定时任务按天执行,直接覆盖历史模型文件
  23. word_vectors.save("/data/pythonProject/video_to_word/models/word2vec.wordvectors")
  24. logger.info(f"word2vec model 训练完成!")
  25. if __name__ == '__main__':
  26. # 0 train model
  27. train_word2vec_model()
  28. # 1 load back with memory-mapping = read-only,
  29. wv = KeyedVectors.load("/data/pythonProject/video_to_word/models/word2vec.wordvectors", mmap='r')
  30. # 2 get numpy vector of a word (for test)
  31. logger.info(f"the vector of 红包 is {wv['红包']}")