| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778 | import base64import jsonimport loggingimport numpy as npimport pandas as pdimport requestsfrom concurrent_log import ConcurrentTimedRotatingFileHandlerfrom config.url_and_db import get_none_water_mark_url# insert into 替换为 replace into 存在重复唯一键时,先删除此行数据,然后插入新的数据def mysql_replace_into(table, conn, keys, data_iter):    from sqlalchemy.ext.compiler import compiles    from sqlalchemy.sql.expression import Insert    @compiles(Insert)    def replace_string(insert, compiler, **kw):        s = compiler.visit_insert(insert, **kw)        s = s.replace("INSERT INTO", "REPLACE INTO")        return s    data = [dict(zip(keys, row)) for row in data_iter]    conn.execute(table.table.insert(replace_string=""), data)class NpEncoder(json.JSONEncoder):    def default(self, obj):        if isinstance(obj, np.integer):            return int(obj)        elif isinstance(obj, np.floating):            return float(obj)        elif isinstance(obj, np.ndarray):            return obj.tolist()        else:            return super(NpEncoder, self).default(obj)# 生成logger对象def get_logger(log_file_name, log_name):    log_formatter = logging.Formatter('%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s', '%Y/%m/%d %I:%M:%S %p')    log_handler = ConcurrentTimedRotatingFileHandler(filename=log_file_name, when="midnight", backupCount=100)    log_handler.setFormatter(log_formatter)    logger = logging.getLogger(log_name)    logger.addHandler(log_handler)    logger.setLevel(logging.DEBUG)  # 日志打印级别    return logger# 获取头条无水印有时效性的链接def get_toutiao_none_water_mark_and_time_efficient_url(vid):    """    依据 vid,得到无水印,有时效性的链接    :return:    """    request_path = get_none_water_mark_url + str(vid)    request = requests.get(request_path)    response_data = json.loads(request.text)    if response_data.get('message') == 'success' and response_data.get('data') and response_data.get('data').get('video_list'):        video_df = pd.DataFrame()        for k, v in response_data.get('data').get('video_list').items():            single_video_df = pd.DataFrame([v])            video_df = video_df.append(single_video_df)        video_df.sort_values(by='size', ascending=True, inplace=True)  # 获取分辨率最高的素材        main_url = video_df['main_url'].values[-1]        main_url_decode = base64.b64decode(main_url)        main_url_decode = str(main_url_decode, encoding='utf-8')  # bytes to str        return main_url_decode    else:        return Noneif __name__ == '__main__':    r = get_toutiao_none_water_mark_and_time_efficient_url('v02033290000budu3753giguv9qli6bg')    print(r)
 |