首页 分享 零基础数据挖掘——金融风控(五)实践树类模型

零基础数据挖掘——金融风控(五)实践树类模型

来源:花匠小妙招 时间:2025-05-09 15:25

1、前言

在重看我的项目的过程中发现自己对于相关知识点理解并不透彻,希望能理论联系实际,加深自己对基础知识的理解。项目来源于阿里天池学习赛——零基础入门金融风控-贷款违约预测,感兴趣的小伙伴可以自己去原文了解。

2、特征工程

①处理时间特征['issueDate']:

['issueDate']转化为与2007-06-01的时间差['issueDateDT'],按天计算。['issueDate']转化为代表贷款月份的['issueDateM'],再转化为one-hot

②['employmentLength']转化为纯数字表示工作年份

③取['earliesCreditLine']最后4个数字作为年份

④自定义编码处理['grade']和['subGrade']

⑤删除部分特征值

['issueDate']已经被转化了['policyCode']只有一个特征,没有意义['id']没有意义
 

import numpy as np

import pandas as pd

import datetime

from sklearn.model_selection import train_test_split

from sklearn.linear_model import LogisticRegression

from sklearn.feature_selection import SelectFromModel

from sklearn.preprocessing import MinMaxScaler,StandardScaler

from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score

data_train = pd.read_csv('D:/myP/financial_risk/train.csv')

data_testA = pd.read_csv('D:/myP/financial_risk/testA.csv')

for data in [data_train, data_testA]:

data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')

startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')

data['issueDateDT'] = data['issueDate'].apply(lambda x: x-startdate).dt.days

for data in [data_train, data_testA]:

data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')

data['issueDateM'] = data['issueDate'].dt.month

def employmentLength_to_int(s):

if pd.isnull(s):

return s

else:

return np.int8(s.split()[0])

for data in [data_train, data_testA]:

data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)

data['employmentLength'].replace('< 1 year', '0 years', inplace=True)

data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)

for data in [data_train, data_testA]:

data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s: int(s[-4:]))

data['earliesCreditLine'].value_counts(dropna=False).sort_index()

for data in [data_train, data_testA]:

data['grade'] = data['grade'].map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7})

data['subGrade'] = data['subGrade'].map({'A1':1,'A2':2,'A3':3,'A4':4,'A5':5,'B1':6,'B2':7,'B3':8,'B4':9,'B5':10,'C1':11,'C2':12,'C3':13,'C4':14,'C5':15,'D1':16,'D2':17,'D3':18,'D4':19,'D5':20, 'E1':21,'E2':22,'E3':23,'E4':24,'E5':25, 'F1':26,'F2':27,'F3':28,'F4':29,'F5':30, 'G1':31,'G2':32,'G3':33,'G4':34,'G5':35})

delFea = ['policyCode','id','issueDate']

for i in delFea:

data_train.drop(i,axis = 1,inplace = True)

data_testA.drop(i,axis = 1,inplace = True)

data_train.to_csv('D:/myP/financial_risk/trainfortree.csv', index = 0)

data_testA.to_csv('D:/myP/financial_risk/testAfortree.csv', index = 0)

3、决策树

4、LightGBM

4.1 确定LGM中决策树的数量n_estimators

import lightgbm as lgb

train = pd.read_csv('D:/myP/financial_risk/trainforclass.csv')

x_train, x_vali, y_train, y_vali = train_test_split(train.drop('isDefault', axis = 1), train['isDefault'], test_size=0.25)

params = {

'boosting_type': 'gbdt',

'objective': 'binary',

'metric': 'auc',

'nthread':10,

'learning_rate':0.1,

'num_leaves':30,

'max_depth': 8,

'subsample': 0.8,

'colsample_bytree': 0.8,

}

all_train = lgb.Dataset(x_train, y_train)

cv_results = lgb.cv(params, all_train, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='auc',early_stopping_rounds=50,seed=2022)

print('best n_estimators:', len(cv_results['auc-mean']))

print('best cv score:', pd.Series(cv_results['auc-mean']).max())

best n_estimators: 385 best cv score: 0.7344472095635076

4.2 确定max_depth和num_leaves

from sklearn.grid_search import GridSearchCV

params_test1={'max_depth': range(3,8,1), 'num_leaves':range(5, 100, 5)}

gsearch1 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=188, max_depth=6, bagging_fraction = 0.8,feature_fraction = 0.8), 

                       param_grid = params_test1, scoring='roc_auc',cv=5,n_jobs=-1)

gsearch1.fit(X_train,y_train)

gsearch1.best_params_, gsearch1.best_score_

({'max_depth': 5, 'num_leaves': 35}, 0.7349047555835462)

4.3 确定'max_bin'和'min_data_in_leaf'

params_test2={'max_bin': range(5,256,10), 'min_data_in_leaf':range(1,102,10)}

gsearch2 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,bagging_fraction = 0.8,feature_fraction = 0.8), param_grid = params_test2, scoring='roc_auc',cv=5, n_jobs=-1)

gsearch2.fit(x_train,y_train)

gsearch2.best_params_, gsearch2.best_score_

({'max_bin': 205, 'min_data_in_leaf': 101}, 0.7353558693303508)

4.4 确定'feature_fraction'、'bagging_fraction'和'bagging_freq'

params_test3={'feature_fraction': [0.6,0.7,0.8,0.9,1.0],

'bagging_fraction': [0.6,0.7,0.8,0.9,1.0],

'bagging_freq': range(0,81,10)

}

gsearch3 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101),

param_grid = params_test3, scoring='roc_auc',cv=5,n_jobs=-1)

gsearch3.fit(x_train,y_train)

gsearch3.best_params_, gsearch3.best_score_

({'bagging_fraction': 0.6, 'bagging_freq': 0, 'feature_fraction': 0.8}, 0.7353558693303508)

4.5 确定'lambda_l1'和'lambda_l2'

params_test4={'lambda_l1': [1e-5,1e-3,1e-1,0.0,0.1,0.3,0.5,0.7,0.9,1.0],

'lambda_l2': [1e-5,1e-3,1e-1,0.0,0.1,0.3,0.5,0.7,0.9,1.0]

}

gsearch4 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101,bagging_fraction=0.6,bagging_freq= 0, feature_fraction= 0.8),

param_grid = params_test4, scoring='roc_auc',cv=5, n_jobs=-1)

gsearch4.fit(x_train,y_train)

gsearch4.best_params_, gsearch4.best_score_

({'lambda_l1': 1.0, 'lambda_l2': 0.3}, 0.735480368997539)

4.6 确定'min_split_gain'

params_test5={'min_split_gain':[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]}

gsearch5 = GridSearchCV(estimator = lgb.LGBMClassifier(boosting_type='gbdt',objective='binary',metrics='auc',learning_rate=0.1, n_estimators=385, max_depth=5, num_leaves=35,max_bin=205,min_data_in_leaf=101,bagging_fraction=0.6,bagging_freq= 0, feature_fraction= 0.8,

lambda_l1=1,lambda_l2=0.3), param_grid = params_test5, scoring='roc_auc',cv=5, n_jobs=-1)

gsearch5.fit(x_train,y_train)

gsearch5.best_params_, gsearch5.best_score_

({'min_split_gain': 0.0}, 0.735480368997539)

相关知识

消费金融风控刷新:用户自主,额度“互动”
工商银行:打造基于大数据的智能化风控体系
数据挖掘浅谈
华为云金融科技峰会在阿举办 盘古金融大模型海外首次亮相
华为云发布2021金融系列产品上新计划,迈向金融云原生2.0
加州理工学院公开课:机器学习与数据挖掘
小雨花分期聚焦科技创新,实现风控策略全生命周期管理
赋能金融行业智变,百度智能云发布开元智慧金融方案2.0
信用评分模型python实践——简介
基于大数据的智能风险防控平台设计与实现

网址: 零基础数据挖掘——金融风控(五)实践树类模型 https://www.huajiangbk.com/newsview1907333.html

所属分类:花卉
上一篇: 2022年央行泉州世界遗产金银纪
下一篇: 开花的树木3D模型 XfrogP

推荐分享