引言
(1)先对idtdf提取特征的ngram大小和feature调参,最终ngram=(1,2)feature=500,最佳
(2)对LogisticRegression、XGBClassifier、LGBMClassifier三个模型单独调参,本人仅仅对XGB的几个参数进行了调整,工作量太庞大,就没有所有参数调整对比分析。这里仅仅提出调参的例子,提供模型调参的思路学习
(3)开源源码https://github.com/823316627bandeng/TIANCHI-2021-AI-Compition
实现
(1)导入包
import os
import numpy as np
import pandas as pd
from sklearn.decomposition import NMF, TruncatedSVD, PCA
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from utils import *
from lightgbm import LGBMClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import roc_auc_score
#加载数据
label= np.array(pd.read_csv('./data/label.csv'))
train = pd.read_csv('./temp/train.csv',header = None,names=['id','text','label'])
adjust_model()
(2)模型的调参
def adjust_model():
Tdf = TfidfVectorizer(ngram_range=(1,2),max_features=500)
tdf_data = Tdf.fit_transform(train['text'])
X_train,X_test,y_train,y_test = train_test_split(tdf_data,label,test_size=0.3)
paralist = []
score_dict = {"list_n":[],"list_f":[],"loss":[]}
# for n in paralist
param_test1 = {'estimator__max_depth':range(2,8,2)}
'''
model = OneVsRestClassifier(XGBClassifier(eval_metric= 'mlogloss',
max_depth = 11,
min_child_weight =1,
use_label_encoder=False,
learning_rate =0.01,
n_estimators=150,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
nthread=100,
scale_pos_weight=1,
seed=27,
verbose=True
))
'''
'''
model = OneVsRestClassifier(LGBMClassifier(is_unbalance = True,
metric = 'binary_logloss,auc',
# max_depth = 6,
num_leaves = 40,
learning_rate = 0.1,
feature_fraction = 0.7,
min_child_samples=21,
min_child_weight=0.001,
bagging_fraction = 1,
bagging_freq = 2,
reg_alpha = 0.001,
reg_lambda = 8,
cat_smooth = 0,
# num_iterations = 200
))
'''
# model = OneVsRestClassifier(LGBMClassifier())
model = OneVsRestClassifier(XGBClassifier(eval_metric= 'mlogloss',use_label_encoder=False,n_estimators=150))
model.fit(X_train, y_train)
predict = model.predict_proba(X_test)
score = roc_auc_score(y_test,predict)
print(score)
XGB
{‘estimator__max_depth’: 9, ‘estimator__min_child_weight’: 1}
{‘estimator__max_depth’: 11}0.9812110365828264
{‘estimator__n_estimators’: 150} 0.9834881407453535
调参后:0.9726861215062805
LGB
{‘estimator__max_depth’: 6}最佳得分 0.9811430144134826
(3)idtdf提取特征调参
#list_ngram = [1,2,3,4]
#list_feature = [100,200,300,400]
def adjust_idtdf():
list_ngram = [1,2,3,4,5]
list_feature = [100,200,300,400,500]
#分数记录字典
score_dict = {"list_n":[],"list_f":[],"loss":[]}
#创建方法进行验证
def para_Tdf(data_x):
for n in list_ngram:
for fea in list_feature:
Tdf = TfidfVectorizer(ngram_range=(1,n),max_features=fea)
tdf_data = Tdf.fit_transform(data_x)
# tdf_data = tdf_data.toarray()
X_train,X_test,y_train,y_test = train_test_split(tdf_data,label,test_size=0.3)
model = OneVsRestClassifier(XGBClassifier(eval_metric= 'mlogloss',use_label_encoder=False,n_estimators=50))
model.fit(X_train, y_train)
predict = model.predict_proba(X_test)
loss = Mutilogloss(y_test,predict)
score_dict["list_n"].append(n)
score_dict['list_f'].append(fea)
score_dict['loss'].append(loss)
print("n={0},feature={1},loss={2}".format(n,fea,loss))
#方法调用
para_Tdf(train['text'])
#以DataFrame形式显示分数
print(score_dict)
最佳是n=2,features = 500
n=1,feature=100,loss=0.09694388171340544
n=1,feature=200,loss=0.07941648607131963
n=1,feature=300,loss=0.0780516995282797
n=1,feature=400,loss=0.07654529189186797
n=1,feature=500,loss=0.07875673493941672
n=2,feature=100,loss=0.10700796997032506
n=2,feature=200,loss=0.0872626769884241
n=2,feature=300,loss=0.08134605319231948
n=2,feature=400,loss=0.07927331816025636
n=2,feature=500,loss=0.07391725763363112
n=3,feature=100,loss=0.10642417486808319
n=3,feature=200,loss=0.0932806660865527
n=3,feature=300,loss=0.0821267581008504
n=3,feature=400,loss=0.08258777666414407
n=3,feature=500,loss=0.07525704598697901
n=4,feature=100,loss=0.10395870861632356
n=4,feature=200,loss=0.09252871191998951
n=4,feature=300,loss=0.08208295772650118
n=4,feature=400,loss=0.08249975725295985
n=4,feature=500,loss=0.07920155662551372
n=5,feature=100,loss=0.10649166642764825
n=5,feature=200,loss=0.09238465463657325
n=5,feature=300,loss=0.08104836900458223
n=5,feature=400,loss=0.07833574743241475
n=5,feature=500,loss=0.07796380784547806