liyuyi@c-top.com.cn 4 yıl önce
ebeveyn
işleme
697922141d
3 değiştirilmiş dosya ile 233 ekleme ve 279 silme
  1. 3 38
      config/config.yaml
  2. 230 0
      full_high_quality_material.py
  3. 0 241
      historical_material.py

+ 3 - 38
config/config.yaml

@@ -25,45 +25,10 @@ localDB:
   port: 3306
   database: mysql
 
-
-bayesDim:
-    age:
-        windowSize: 3
-        isOn: True
-        Lst:
-          - '18-23岁'
-          - '24-30岁'
-          - '31-40岁'
-          - '41-49岁'
-          - '50+岁'
-        table: 'ctop_kuaishou_audience_report_daily_age_material'
-        fieldName: 'age_segment'
-    gender:
-        windowSize: 1
-        isOn: True
-        Lst:
-            - '男'
-            - '女'
-        table: 'ctop_kuaishou_audience_report_daily_gender_material'
-        fieldName: 'gender'
-    city:
-        windowSize: 4
-        isOn: False
-        Lst:
-            - '一线城市'
-            - '新一线城市'
-            - '二线城市'
-            - '三线城市'
-            - '四线城市'
-            - '五线城市'
-        table: 'ctop_kuaishou_audience_report_daily_city_material'
-        fieldName: 'city'
-
-
 # k: 返回最多几个相邻素材
-# distanceThreshold: 小于等于该距离阈值,才判定为相似素材
+# distanceThreshold: 小于等于该距离阈值(通过计算得到的距离和主观看素材内容主观定的阈值),才判定为相似素材
 nearestNeighbors:
-  k: 30
-  distanceThreshold: 20
+  k: 6
+  distanceThreshold: 5
 
 

+ 230 - 0
full_high_quality_material.py

@@ -0,0 +1,230 @@
+import os
+import yaml
+from common_func import get_db_engine
+import pandas as pd
+from sklearn.preprocessing import StandardScaler
+from sklearn.neighbors import NearestNeighbors
+import numpy as np
+import datetime
+import sys
+import traceback
+import pickle
+import logging
+from concurrent_log import ConcurrentTimedRotatingFileHandler
+
+log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
+log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/similar_material/logs/historical_material.log", when="midnight",
+                                                 backupCount=100)
+log_handler.setFormatter(log_formatter)
+logger = logging.getLogger('historical_material_logger')
+logger.addHandler(log_handler)
+logger.setLevel(logging.DEBUG)
+logger.info("historical_material_logger started!")
+print('id of historical_material_logger %s' % id(logger))
+
+
+def scaler_feature(data, axis=0):
+    """
+    :param data: pd.DataFrame
+    :param axis: 0-横向归一化 ,  1-纵向归一化
+    :return: z = (x - u) / s
+    """
+    scaler = StandardScaler()
+    scaler_data = None
+    if axis == 0:
+        scaler_data = scaler.fit_transform(data.T).T
+    if axis == 1:
+        scaler_data = scaler.fit_transform(data)
+
+    scaler_df = pd.DataFrame(scaler_data)
+    scaler_df.index = data.index
+    scaler_df.columns = [col + '_scaler' for col in data.columns]
+    return scaler_df
+
+
+def get_segment_features(table, signature, dim_col, project_id, col_prefix):
+    # 1 获取原始数据
+    get_org_data_sql = """
+    select signature, aclick, bclick, activation, charge, %s from %s
+    where signature in %s and project_id = %s
+    """ % (dim_col, table, tuple(signature), project_id)
+    org_df = pd.read_sql(get_org_data_sql, read_engine)
+
+    # 2 计算各分段的 行为率、激活率、激活成本,并进行横向归一化
+    g = org_df.groupby(['signature', dim_col]).agg({'activation': sum, 'aclick': sum, 'bclick': sum, 'charge': sum}).unstack()
+    # 该维度下有哪些分段值:如 gender维度下有:男、女、其他 , 需要去掉 '其他',因为业务含义不明确,没法解释模型效果
+    segment_cols = list(g['charge'].columns)
+    if '其他' in segment_cols:
+        segment_cols.remove('其他')
+
+    segment_fea_df = pd.DataFrame()
+    for col in segment_cols:
+        segment_fea_df[col + '_bclick_aclick_pct'] = g[('bclick', col)] / g[('aclick', col)]
+        segment_fea_df[col + '_activation_bclick_pct'] = g[('activation', col)] / g[('bclick', col)]
+        segment_fea_df[col + '_cost'] = g[('charge', col)] / g[('activation', col)]
+
+    # 3 计算累计激活中各分段的激活个数占比,并进行横向归一化
+    for col in segment_cols:
+        segment_fea_df[col + '_activation_pct'] = g[('activation', col)] / (g[[('activation', item) for item in segment_cols]].sum(axis=1))
+
+    # 4 对 np.nan 和 np.inf 进行处理
+    # 4-1 cost
+    # np.inf : 分子>0/分母=0, 表示有消耗但是没有激活数,填充为 999
+    # np.nan :分子=0/分母=0, 表示没有消耗也没有激活数,填充为 -1
+    cost_cols = [col for col in segment_fea_df.columns if 'cost' in col]
+    segment_fea_df[cost_cols] = segment_fea_df[cost_cols].replace({np.nan: -1, np.inf: 999})
+
+    # 4-2 占比类
+    # np.inf :分子>0/分母=0 ,如行为率:表示有点击但没有曝光,属于数据bug情况。需要剔除该素材并写入日志
+    # np.nan :分子=0/分母=0 ,如激活率:表示没有激活也没有点击,填充为 0
+    pct_cols = [col for col in segment_fea_df.columns if 'pct' in col]
+    segment_fea_df[pct_cols] = segment_fea_df[pct_cols].replace({np.nan: 0})
+
+    inf_df = np.isinf(segment_fea_df[pct_cols]).sum(axis=1)
+    remove_sig_lst = inf_df[inf_df >= 1].index
+    if len(remove_sig_lst):
+        logger.error('素材的人群报表数据存在异常(如:点击数据大于0,但是曝光数据等于0),以下素材不参与相似素材计算:%s' % remove_sig_lst)
+        print(remove_sig_lst)
+
+    segment_fea_df = segment_fea_df[~segment_fea_df.index.isin(remove_sig_lst)]
+
+    # 5 对特征进行横向归一化
+    segment_fea_scaler_df = pd.DataFrame()
+    for col in ['bclick_aclick_pct', 'activation_bclick_pct', 'cost', 'activation_pct']:
+        tmp_df = segment_fea_df.filter(like=col)
+        scaler_df = scaler_feature(tmp_df, axis=0)
+        segment_fea_scaler_df = pd.concat([segment_fea_scaler_df, scaler_df], axis=1)
+
+    # 6 col_prefix and index
+    segment_fea_scaler_df.columns = [col_prefix + col for col in segment_fea_scaler_df.columns]
+    segment_fea_scaler_df.index = g.index
+    return segment_fea_scaler_df
+
+
+def get_signature_features(signature, project_id):
+    """
+    计算素材整体的 行为率、激活率、激活成本
+    :param project_id:
+    :param signature:
+    :return:
+    """
+    get_data_sql = """ select signature, sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation, sum(charge) charge
+    from ctop_kuaishou_audience_report_daily_age_material
+    where signature in %s and project_id = %s
+    group by signature
+    """ % (tuple(signature), project_id)
+    sig_org_df = pd.read_sql(get_data_sql, read_engine)
+
+    sig_fea_df = pd.DataFrame()
+    sig_fea_df['cost'] = sig_org_df['charge'] / sig_org_df['activation']
+    sig_fea_df['bclick_aclick_pct'] = sig_org_df['bclick'] / sig_org_df['aclick']
+    sig_fea_df['activation_bclick_pct'] = sig_org_df['activation'] / sig_org_df['bclick']
+
+    # 对特征进行纵向的归一化
+    sig_fea_df.index = sig_org_df['signature'].values
+    sig_fea_scaler_df = scaler_feature(sig_fea_df, axis=1)
+
+    return sig_fea_scaler_df
+
+
+if __name__ == '__main__':
+    try:
+        # 1、读取配置文件
+        with open('config/config.yaml', mode='r', encoding='utf-8') as f:
+            config = yaml.load(f.read(), Loader=yaml.FullLoader)
+
+        # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
+        # 读数据库引擎使用生产数据库,写数据库引擎依据系统环境进行切换(测试数据库/生产数据库)
+        if os.getenv('LYY_DEV', 'unknown') == 'dev':
+            write_engine = get_db_engine(config['devDB'])
+        else:
+            write_engine = get_db_engine(config['productDB'])
+
+        read_engine = get_db_engine(config['productDB'])
+
+        # 2、按项目计算相似素材
+        project_id_lst = config['projectId']
+        for project_id in project_id_lst:
+            # 获取历史所有达标的素材 - - 暂定为激活个数达到100个的素材
+            get_sig_sql = """select signature, sum(activation) activation
+                     from ctop_kuaishou_audience_report_daily_age_material
+                     where project_id = %s
+                     group by signature
+                    having sum(activation) >= 100
+                    """ % project_id
+            sig_df = pd.read_sql(get_sig_sql, read_engine)
+            sig_df = sig_df[~sig_df.signature.isnull()]
+            sig_lst = list(sig_df.signature.values)
+
+            if sig_lst is None:
+                logger.info("没有达标的素材,系统退出!")
+                sys.exit(0)
+            if len(sig_lst) == 1:
+                logger.info("达标的素材只有一个,没法计算得到与之相似的素材,系统退出!")
+                sys.exit(1)
+
+            # 2、构造特征
+            # 2-1 获取素材整体的特征情况:行为率、激活率、激活成本
+            sig_feature_df = get_signature_features(sig_lst, project_id)
+
+            # 2-2 获取各维度(age,gender)各分段(男、女等)的行为率、激活率、激活成本、激活占比,并进行纵向归一化
+            # table_dim_tuple 中的元素:表名称、分段名称、构造特征名称的前缀
+            table_dim_tuple = [('ctop_kuaishou_audience_report_daily_age_material', 'age_segment', 'age_'),
+                               ('ctop_kuaishou_audience_report_daily_gender_material', 'gender', 'gender_')]
+
+            segment_feature_df = pd.DataFrame()
+            for table, dim_col, col_prefix in table_dim_tuple:
+                tmp_df = get_segment_features(table, sig_lst, dim_col, project_id, col_prefix)
+                segment_feature_df = pd.concat([segment_feature_df, tmp_df], axis=1)
+
+            # 2-3 素材整体的特征 和 各维度各分段的特征 进行合并,得到最终的特征DataFrame
+            final_feature_df = pd.concat([sig_feature_df, segment_feature_df], axis=1)
+            # 部分素材在 age 或者 gender 维度上没有数据,特征 concat 时,会导致空值
+            # 去掉这部分的素材,防止在调用 NearestNeighbors 时,报错
+            null_df = final_feature_df.isnull().sum(axis=1)
+            exist_null_fea_sig_lst = null_df[null_df >= 1].index
+
+            # 记录存在部分维度数据缺失的素材
+            if len(exist_null_fea_sig_lst) > 0:
+                logger.info("以下这些素材存在部分维度数据缺失:%s" % exist_null_fea_sig_lst)
+
+            final_feature_df = final_feature_df[~final_feature_df.index.isin(exist_null_fea_sig_lst)]
+
+            # 3 Finding the Nearest Neighbors
+            k = config['nearestNeighbors']['k']
+            distance_threshold = config['nearestNeighbors']['distanceThreshold']
+            X = final_feature_df.values
+            nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)
+            distances, indices = nbrs.kneighbors(X)
+
+            # 3-1 两点之间的距离超过阈值时,不作为相邻素材
+            # mask 中,为 True 的元素替换为 NaN
+            mask = distances > distance_threshold
+            indices_mask_df = pd.DataFrame(indices).mask(mask)
+            indices_mask_df = indices_mask_df.astype(dtype=pd.Int64Dtype())
+
+            # 3-2 把 index 替换为具体的 signatures
+            sig_lst = final_feature_df.index.values
+            indices_mask_df.index = sig_lst
+
+            indices_mask_df = indices_mask_df.applymap(lambda x: sig_lst[x] if not pd.isnull(x) else x)
+            res_lst = []
+            for index, row in indices_mask_df.iterrows():
+                others = [item for item in row if (not pd.isnull(item)) and (item != index)]
+                res_lst.append([index] + others)
+
+            res_df = pd.DataFrame(res_lst)
+            res_df.columns = ['signature'] + ['similar_signature_' + str(i + 1) for i in range(res_df.shape[1] - 1)]
+            columns = ['signature'] + ['similar_signature_' + str(i + 1) for i in range(k-1)]
+            res_df = res_df.reindex(columns=columns)
+
+            # 4 结果入库
+            res_df['stat_date'] = datetime.date.today().strftime('%Y-%m-%d')
+            res_df['project_id'] = project_id
+            res_df.to_sql(name="ctop_ai_kuaishou_signature_similar",
+                          con=write_engine,
+                          if_exists='append',
+                          index=False)
+            logger.info("project_id = %s 的素材相似度计算完成!" % project_id)
+    except:
+        logger.error("异常信息为:%s" % (traceback.format_exc()))

+ 0 - 241
historical_material.py

@@ -1,241 +0,0 @@
-import os
-import yaml
-from common_func import get_db_engine
-import pandas as pd
-from sklearn.preprocessing import StandardScaler
-from sklearn.neighbors import NearestNeighbors
-import numpy as np
-import sys
-import pickle
-import logging
-from concurrent_log import ConcurrentTimedRotatingFileHandler
-
-log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%Y/%m/%d %I:%M:%S %p')
-log_handler = ConcurrentTimedRotatingFileHandler("/data/pythonProject/similar_material/logs/historical_material.log", when="midnight",
-                                                 backupCount=100)
-log_handler.setFormatter(log_formatter)
-logger = logging.getLogger('historical_material_logger')
-logger.addHandler(log_handler)
-logger.setLevel(logging.DEBUG)
-logger.info("historical_material_logger started!")
-print('id of historical_material_logger %s' % id(logger))
-
-
-def scaler_feature(data, axis=0):
-    """
-    :param data: pd.DataFrame
-    :param axis: 0-横向归一化 ,  1-纵向归一化
-    :return: z = (x - u) / s
-    """
-    scaler = StandardScaler()
-    scaler_data = None
-    if axis == 0:
-        scaler_data = scaler.fit_transform(data.T).T
-    if axis == 1:
-        scaler_data = scaler.fit_transform(data)
-
-    scaler_df = pd.DataFrame(scaler_data)
-    scaler_df.index = data.index
-    scaler_df.columns = [col + '_scaler' for col in data.columns]
-    return scaler_df
-
-
-def get_segment_features(table, signature, dim_col, project_id, col_prefix):
-    # 1 获取原始数据
-    get_org_data_sql = """
-    select signature, aclick, bclick, activation, charge, %s from %s
-    where signature in %s and project_id = %s
-    """ % (dim_col, table, tuple(signature), project_id)
-    org_df = pd.read_sql(get_org_data_sql, read_engine)
-
-    # 2 计算各分段的 行为率、激活率、激活成本,并进行横向归一化
-    g = org_df.groupby(['signature', dim_col]).agg({'activation': sum, 'aclick': sum, 'bclick': sum, 'charge': sum}).unstack()
-    # 该维度下有哪些分段值:如 gender维度下有:男、女、其他 , 需要去掉 '其他',因为业务含义不明确,没法解释模型效果
-    segment_cols = g['charge'].columns
-    if '其他' in segment_cols:
-        segment_cols.remove('其他')
-
-    segment_fea_df = pd.DataFrame()
-    for col in segment_cols:
-        segment_fea_df[col + '_bclick_aclick_pct'] = g[('bclick', col)] / g[('aclick', col)]
-        segment_fea_df[col + '_activation_bclick_pct'] = g[('activation', col)] / g[('bclick', col)]
-        segment_fea_df[col + '_cost'] = g[('charge', col)] / g[('activation', col)]
-
-    # 3 计算累计激活中各分段的激活个数占比,并进行横向归一化
-    for col in segment_cols:
-        segment_fea_df[col + '_activation_pct'] = g[('activation', col)] / (g[[('activation', item) for item in segment_cols]].sum(axis=1))
-
-    # 4 对 np.nan 和 np.inf 进行处理
-    # 4-1 cost
-    # np.inf : 分子>0/分母=0, 表示有消耗但是没有激活数,填充为 999
-    # np.nan :分子=0/分母=0, 表示没有消耗也没有激活数,填充为 -1
-    cost_cols = [col for col in segment_fea_df.columns if 'cost' in col]
-    segment_fea_df[cost_cols] = segment_fea_df[cost_cols].replace({np.nan: -1, np.inf: 999})
-
-    # 4-2 占比类
-    # np.inf :分子>0/分母=0 ,如行为率:表示有点击但没有曝光,属于数据bug情况。需要剔除该素材并写入日志
-    # np.nan :分子=0/分母=0 ,如激活率:表示没有激活也没有点击,填充为 0
-    pct_cols = [col for col in segment_fea_df.columns if 'pct' in col]
-    segment_fea_df[pct_cols] = segment_fea_df[pct_cols].replace({np.nan: 0})
-
-    inf_df = np.isinf(segment_fea_df[pct_cols]).sum(axis=1)
-    remove_sig_lst = inf_df[inf_df >= 1].index
-    # TODO 添加日志 记录数据存在问题的素材
-    if len(remove_sig_lst):
-        print(remove_sig_lst)
-
-    segment_fea_df = segment_fea_df[~segment_fea_df.index.isin(remove_sig_lst)]
-
-    # 5 对特征进行横向归一化
-    segment_fea_scaler_df = pd.DataFrame()
-    for col in ['bclick_aclick_pct', 'activation_bclick_pct', 'cost', 'activation_pct']:
-        tmp_df = segment_fea_df.filter(like=col)
-        scaler_df = scaler_feature(tmp_df, axis=0)
-        segment_fea_scaler_df = pd.concat([segment_fea_scaler_df, scaler_df], axis=1)
-
-    # 6 col_prefix and index
-    segment_fea_scaler_df.columns = [col_prefix + col for col in segment_fea_scaler_df.columns]
-    segment_fea_scaler_df.index = g.index
-    return segment_fea_scaler_df
-
-
-def get_signature_features(signature, project_id):
-    """
-    计算素材整体的 行为率、激活率、激活成本
-    :param project_id:
-    :param signature:
-    :return:
-    """
-    get_data_sql = """ select signature, sum(aclick) aclick, sum(bclick) bclick, sum(activation) activation, sum(charge) charge
-    from ctop_kuaishou_audience_report_daily_age_material
-    where signature in %s and project_id = %s
-    group by signature
-    """ % (tuple(signature), project_id)
-    sig_org_df = pd.read_sql(get_data_sql, read_engine)
-
-    sig_fea_df = pd.DataFrame()
-    sig_fea_df['cost'] = sig_org_df['charge'] / sig_org_df['activation']
-    sig_fea_df['bclick_aclick_pct'] = sig_org_df['bclick'] / sig_org_df['aclick']
-    sig_fea_df['activation_bclick_pct'] = sig_org_df['activation'] / sig_org_df['bclick']
-
-    # 对特征进行纵向的归一化
-    sig_fea_df.index = sig_org_df['signature'].values
-    sig_fea_scaler_df = scaler_feature(sig_fea_df, axis=1)
-
-    return sig_fea_scaler_df
-
-
-if __name__ == '__main__':
-    # 1、读取配置文件
-    with open('config/config.yaml', mode='r', encoding='utf-8') as f:
-        config = yaml.load(f.read(), Loader=yaml.FullLoader)
-
-    # 1-1 数据库连接引擎,依据开发环境/生产环境 进行切换
-    if os.getenv('LYY_DEV', 'unknown') == 'dev':
-        write_engine = get_db_engine(config['devDB'])
-    else:
-        write_engine = get_db_engine(config['productDB'])
-
-    read_engine = get_db_engine(config['productDB'])
-
-    # 2、获取历史所有达标的素材
-    project_id = 458
-    get_sig_sql = """select signature, sum(activation) activation
-             from ctop_kuaishou_audience_report_daily_age_material
-             where project_id = %s
-             group by signature
-            having sum(activation) >= 100
-            """ % project_id
-    sig_df = pd.read_sql(get_sig_sql, read_engine)
-    sig_df = sig_df[~sig_df.signature.isnull()]
-    sig_lst = list(sig_df.signature.values)
-    print('sig_lst:', len(sig_lst))
-
-    # TODO for test
-    # sig_lst = ['cefd07cf7951ba9674d43c1d9c112f37',
-    #            '6f11859acaf4f1dd501706f8d50995d8',
-    #            'bc8fe6ae97db794acaf050acff4386b9',
-    #            '59a34e41bfcf321a931a0b204a8609c9',
-    #            'c67604889af726d49401f40283865bb0',
-    #            '75237de4427a9ff6da994976bf516f78',
-    #            '0ab2a8816b1b34638767862123fc74a6',
-    #            '177a0f359eb90623b7ecaf1c8d0b9db4',
-    #            'd3c9bbd98d07afb7be9fe8b1b40133d3']
-
-    if sig_lst is None:
-        # TODO 加入 logger
-        sys.exit(0)
-    if len(sig_lst) == 1:
-        # TODO 加入 logger
-        sys.exit(1)
-
-    # 2、构造特征
-    # 2-1 获取素材整体的特征情况:行为率、激活率、激活成本
-    sig_feature_df = get_signature_features(sig_lst, project_id)
-
-    # 2-2 获取各维度(age,gender)各分段(男、女等)的行为率、激活率、激活成本、激活占比,并进行纵向归一化
-    # get_segment_features(table, signature, dim_col, project_id)
-    table_dim_tuple = [('ctop_kuaishou_audience_report_daily_age_material', 'age_segment', 'age_'),
-                       ('ctop_kuaishou_audience_report_daily_gender_material', 'gender', 'gender_')]
-
-    segment_feature_df = pd.DataFrame()
-    for table, dim_col, col_prefix in table_dim_tuple:
-        tmp_df = get_segment_features(table, sig_lst, dim_col, project_id, col_prefix)
-        segment_feature_df = pd.concat([segment_feature_df, tmp_df], axis=1)
-
-    # 2-3 素材整体的特征 和 各维度各分段的特征 进行合并,得到最终的特征DataFrame
-    final_feature_df = pd.concat([sig_feature_df, segment_feature_df], axis=1)
-    # 部分素材在 age 或者 gender 维度上没有数据,特征concat时,会导致空值
-    # 去掉这部分的素材,防止在调用 NearestNeighbors 时,报错
-    null_df = final_feature_df.isnull().sum(axis=1)
-    exist_null_fea_sig_lst = null_df[null_df >= 1].index
-
-    # TODO 添加日志 记录存在数据缺失的素材
-
-    if len(exist_null_fea_sig_lst) > 0:
-        print(exist_null_fea_sig_lst)
-
-    final_feature_df = final_feature_df[~final_feature_df.index.isin(exist_null_fea_sig_lst)]
-
-    with open('final_feature_df.pkl', 'wb') as f:
-        pickle.dump(final_feature_df, f)
-
-    # 3 Finding the Nearest Neighbors
-    k = config['nearestNeighbors']['k']
-    distance_threshold = config['nearestNeighbors']['distanceThreshold']
-    X = final_feature_df.values
-    nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(X)
-    distances, indices = nbrs.kneighbors(X)
-
-    with open('distances.pkl', 'wb') as f:
-        pickle.dump(distances, f)
-
-    with open('indices.pkl', 'wb') as f:
-        pickle.dump(indices, f)
-
-    # 3-1 两点之间的距离超过阈值时,不作为相邻素材
-    # mask 中,为 True 的元素替换为 NaN
-    mask = distances > distance_threshold
-    indices_mask_df = pd.DataFrame(indices).mask(mask)
-    indices_mask_df = indices_mask_df.astype(dtype=pd.Int64Dtype())
-
-    # 3-2 把 index 替换为具体的 signatures
-    sig_lst = final_feature_df.index.values
-    indices_mask_df.index = sig_lst
-
-    indices_mask_df = indices_mask_df.applymap(lambda x: sig_lst[x] if not pd.isnull(x) else x)
-    res_lst = []
-    for index, row in indices_mask_df.iterrows():
-        others = [item for item in row if (not pd.isnull(item)) and (item != index)]
-        res_lst.append([index] + others)
-
-    res_df = pd.DataFrame(res_lst)
-    res_df.columns = ['self'] + ['N' + str(i + 1) for i in range(res_df.shape[1] - 1)]
-    columns = ['self'] + ['N' + str(i + 1) for i in range(k)]
-    res_df = res_df.reindex(columns=columns)
-
-    with open('res_df.pkl', 'wb') as f:
-        pickle.dump(res_df, f)
-
-    # 4 TODO 结果入库
-    pass