引言
采用机器学习分类算法XGBClassifier、LGBMClassifier、LogisticRegression集成学习线上得到0.83+的准确率
开源源码:https://github.com/823316627bandeng/TIANCHI-2021-AI-Compition
模型实现
(1)导入包
import os
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import ClassifierChain, ClassifierMixin, MultiOutputClassifier
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold, MultilabelStratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from mlxtend.classifier import StackingClassifier
from utils import *
import os
import pickle
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
(2)准备数据
#加载数据
label= np.array(pd.read_csv('./data/label.csv'))
#train_sampel = pd.read_csv('./data/train_sample_500.csv')
train_sampel = pd.read_csv('./data/train_sample.csv')
test_sampel = pd.read_csv('./data/test_sample.csv')
#test_sampel = pd.read_csv('./data/test_sample_500.csv')
#数据归一化
stdScalar = StandardScaler()
train_df = stdScalar.fit_transform(np.float_(train_sampel))
test_df = stdScalar.fit_transform(np.float_(test_sampel))
losslist = []
nfold = 5
kf = MultilabelStratifiedKFold(n_splits=nfold, shuffle=True, random_state=2020)
lr_oof = np.zeros(label.shape)
# 存储测试集的概率
probility = np.zeros((len(test_df), label.shape[1]))
i = 0
model_type = 'ensemble'
# model_type ='single'
# K折交叉划分训练
for train_index, valid_index in kf.split(train_df, label):
print("\nFold {}".format(i + 1))
X_train, label_train = train_df[train_index], label[train_index]
X_valid, label_valid = train_df[valid_index], label[valid_index]
# 三个模型
clf1 = OneVsRestClassifier(XGBClassifier(eval_metric= 'mlogloss',use_label_encoder=False,n_estimators=150))
clf2 = LGBMClassifier()
clf3 = LogisticRegression(max_iter =500, n_jobs=20)
# 集成学习方法1
if model_type == 'ensemble':
model = OneVsRestClassifier(EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],weights=[2, 1, 1], voting='soft', verbose=2))
# 集成学习方法2
elif model_type == 'stacking':
lr = LogisticRegression()
base = StackingClassifier(classifiers=[clf1, clf2, clf3],use_probas=True,average_probas=False, meta_classifier=lr,verbose=2)
model = OneVsRestClassifier(base)
else:
# 单模型训练
model = OneVsRestClassifier(XGBClassifier(eval_metric= 'mlogloss',use_label_encoder=False,n_estimators=150))
model.fit(X_train, label_train)
# 预测结果
lr_oof[valid_index] = model.predict_proba(X_valid,)
# 计算mlogloss
loss = Mutilogloss(label_valid[:,:-1,], lr_oof[valid_index][:,:-1,])
losslist.append(loss)
# 多个flod预测结果叠加
probility += model.predict_proba(test_df) / nfold
i += 1
print(losslist)
print(np.mean(losslist))
print()
# 保存存提交数据
submit_dir='submits/'
if not os.path.exists(submit_dir): os.makedirs(submit_dir)
str_w=''
with open(submit_dir+'machine_model_submit.csv','w') as f:
for i in range(len(probility)):
list_to_str = [str(x) for x in list(probility[i])][0:-1]
liststr = " ".join(list_to_str)
str_w+=str(i)+'|'+','+'|'+liststr+'\n'
str_w=str_w.strip('\n')
f.write(str_w)
print()