get_data.py 3.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. from concurrent.futures import ThreadPoolExecutor
  2. import requests
  3. import datetime
  4. import json
  5. import pandas as pd
  6. import uuid
  7. import traceback
  8. from sqlalchemy import create_engine
  9. from urllib import parse
  10. from urllib.parse import urlencode
  11. if __name__ == '__main__':
  12. online_db_con_str = "mysql+pymysql://%s:%s@%s:%d/%s" % ("readonly", parse.quote_plus("hcst@2021"), "139.186.27.96", 3390, "jeecg-boot")
  13. online_engine = create_engine(online_db_con_str, connect_args={'charset': 'utf8'})
  14. test_db_con_str = "mysql+pymysql://%s:%s@%s:%d/%s" % ("hcst", parse.quote_plus("hcst@2020"), "139.186.165.84", 3306, "db_ai_word")
  15. test_engine = create_engine(test_db_con_str, connect_args={'charset': 'utf8'})
  16. task_submit_url = 'http://139.186.165.84:31013/asr/task/submit'
  17. task_result_url = 'http://139.186.165.84:31013/asr/task/result'
  18. # 1、获取素材信息
  19. # get_material_info_sql = """
  20. # select t2.signature, t2.url
  21. # from
  22. # (select signature, account_id from ctop_kuaishou_report_daily_material
  23. # where account_id in (select account_id from ctop_user_allocation where project_id = 458)
  24. # group by signature
  25. # having sum(activation) <= 10 and sum(activation) >= 1
  26. # ) t1
  27. # left join
  28. # ctop_kuaishou_video_get t2
  29. # on t1.signature = t2.signature and t1.account_id = t2.account_id
  30. # where t2.signature is not null
  31. # """
  32. # material_info_df = pd.read_sql(get_material_info_sql, online_engine)
  33. # material_info_df = material_info_df[(~material_info_df.signature.isnull()) & (~material_info_df.url.isnull())]
  34. # N = 3000 if len(material_info_df) >= 3000 else len(material_info_df)
  35. # material_info_df = material_info_df.sample(n=N, random_state=2077)
  36. # material_info_df = pd.read_csv('merge_df_taote.csv')
  37. # material_info_df = pd.read_csv('merge_df_zhifubao.csv')
  38. # material_info_df = material_info_df[(~material_info_df.signature.isnull()) & (~material_info_df.url.isnull())]
  39. # 2 获取已经提交的任务
  40. # submit_task_sql = """select task_id, md5 signature from tb_asr_result """
  41. # submit_task_df = pd.read_sql(submit_task_sql, test_engine)
  42. # 3 需要提交的任务
  43. # to_submit_task_df = material_info_df[~material_info_df.signature.isin(submit_task_df.signature.values)]
  44. # 4 任务提交
  45. # for index, row in to_submit_task_df.iterrows():
  46. # material_md5 = row['signature']
  47. # material_url = row['url']
  48. # print(material_md5, material_url)
  49. # request_data = {"md5": material_md5, "url": material_url}
  50. # # 表单形式的入参,不是 json 块入参,请求方式不一样
  51. # request_full_path = task_submit_url + '?' + urlencode(request_data)
  52. # request = requests.post(request_full_path)
  53. # try:
  54. # result = json.loads(request.text)
  55. # print(result)
  56. # except:
  57. # print("error", request.text)
  58. # 5、任务获取
  59. get_result_sql = """select task_id from tb_asr_result where word_text is null"""
  60. get_result_df = pd.read_sql(get_result_sql, test_engine)
  61. for index, row in get_result_df.iterrows():
  62. task_id = row['task_id']
  63. request_data = {'task_id': task_id}
  64. request_full_path = task_result_url + '?' + urlencode(request_data)
  65. request = requests.post(request_full_path)
  66. try:
  67. result = json.loads(request.text)
  68. print(task_id, result['status'])
  69. except:
  70. print("error", task_id, request.text)