分析Age
plt.figure(figsize=(18,4)) train_age['Age']=train_age['Age'].astype(np.int) average_age=train_age[['Age','Survived']].groupby('Age',as_index=False).mean() sns.barplot(x='Age',y='Survived',data=average_age,palette='BuPu')
这里粗略的得出了Age与Survived的关系
分析Sibsp和Parch
# 筛选出有无兄弟姐妹 sibsp_df = train[train['SibSp']!=0] # 有兄弟姐妹 no_sibsp_df = train[train['SibSp']==0] # 没有兄弟姐妹 # 筛选处有无父母子女 parch_df = train[train['Parch']!=0] # 有父母子女 no_parch_df = train[train['Parch']==0] # 没有父母 plt.figure(figsize=(12,3)) plt.subplot(141) plt.axis('equal') sibsp_df['Survived'].value_counts().plot.pie(labels=['No Survived','Survived'],autopct='%1.1f%%',colormap='Blues') plt.subplot(142) plt.axis('equal') no_sibsp_df['Survived'].value_counts().plot.pie(labels=['No Survived','Survived'],autopct='%1.1f%%',colormap='Blues') plt.subplot(143) plt.axis('equal') parch_df['Survived'].value_counts().plot.pie(labels=['No Survived','Survived'],autopct='%1.1f%%',colormap='Reds') plt.subplot(144) plt.axis('equal') no_parch_df['Survived'].value_counts().plot.pie(labels=['No Survived','Survived'],autopct='%1.1f%%',colormap='Reds') # 亲戚多少与是否存活有关吗? fig,ax=plt.subplots(1,2,figsize=(15,4)) train[['Parch','Survived']].groupby('Parch').mean().plot.bar(ax=ax[0]) train[['SibSp','Survived']].groupby('SibSp').mean().plot.bar(ax=ax[1]) train['family_size']=train['Parch']+train['SibSp']+1 train[['family_size','Survived']].groupby('family_size').mean().plot.bar(figsize=(15,4))
从以上数据可以较为清楚的去分析兄弟姐妹还有父母与最后的survived的关系,由于兄弟姐妹和父母我认为可以看为家庭成员,所以后来我将这些加起来,然后画出了一个直方图较为好的显示出数据
分析Pclass
train[['Pclass','Survived']].groupby('Pclass').mean().plot.bar(
# 查看Survived 与 Pclass的关系 Survived_Pclass = train['Pclass'].groupby(train['Survived']) print(Survived_Pclass.value_counts().unstack()) Survived_Pclass.value_counts().unstack().plot(kind = 'bar', stacked = True) plt.show()
从数据分析可以看出来,等级越高的,存活率就越高,所以我们可以推测,舱的等级与结果有关
分析Fare
fig,ax=plt.subplots(1,2,figsize=(15,4)) train['Fare'].hist(bins=70,ax=ax[0]) train.boxplot(column='Fare',by='Pclass',showfliers=False,ax=ax[1]) fare_not_survived=train['Fare'][train['Survived']==0] fare_survived=train['Fare'][train['Survived']==1] # 筛选数据 average_fare=pd.DataFrame([fare_not_survived.mean(),fare_survived.mean()]) std_fare=pd.DataFrame([fare_not_survived.std(),fare_survived.std()]) average_fare.plot(yerr=std_fare,kind='bar',figsize=(15,4),grid=True)
然后我们再看了看乘客费用与Pclass以及Survived的关系,可以看出来Pclass的等级是与Fare有关的,Pclass为1的Fare会比其他的高,最后survived也是有关的
分析Embared
sns.barplot('Embarked', 'Survived', data=train, color="teal") plt.show()
可以看出C入口的存活率是更高的
总体结合分析
fig,ax=plt.subplots(1,2,figsize=(18,8)) sns.violinplot('Pclass','Age',hue='Survived',data=train_age,split=True,ax=ax[0]) ax[0].set_title('Pclass and Age vs Survived') sns.violinplot('Sex','Age',hue='Survived',data=train_age,split=True,ax=ax[1]) ax[1].set_title('Sex and Age vs Survived')
这里是希望比较一下Sex和Age还有Pclass和Age的关系
fig=plt.figure() fig.set(alpha=0.2) plt.subplot2grid((2,3),(0,0)) train.Survived.value_counts().plot(kind='bar') plt.title('Survived') plt.ylabel('num') plt.subplot2grid((2,3),(0,1)) train.Pclass.value_counts().plot(kind='bar') plt.title('Pclass') plt.ylabel('num') plt.subplot2grid((2,3),(0,2)) plt.scatter(train.Survived,train.Age) plt.ylabel('Age') plt.grid(b=True,which='major',axis='y') plt.title('Age') plt.subplot2grid((2,3),(1,0),colspan=2) train.Age[train.Pclass == 1].plot(kind='kde') train.Age[train.Pclass == 2].plot(kind='kde') train.Age[train.Pclass == 3].plot(kind='kde') plt.xlabel('Age') plt.ylabel('Density') plt.title('Distribution of passenger ages by Pclass') plt.legend(('first','second','third'),loc='best') plt.subplot2grid((2,3),(1,2)) train.Embarked.value_counts().plot(kind='bar') plt.title('Embaked') plt.ylabel('num') plt.show()
这里较为好的表示出来各个关系,有个更直观的感受
从上面的各个数据可以总结一下,最后的survived的值
我们推测可能与Sex是密切相关的,可能在救援过程中,人们会对女性有所照顾。
和Fare和Pclass应该也是密切相关的,这是一个重要的特征,有钱人的存活率会更高,头等舱的存活率也会更高,而票价低的乘客存活率会低很多
同时,观察Embarked,我们可以看到C出口的存活率更高,这可能是因为C出口更加靠近舱门,所以救援速度就会更快
3.建立模型
为逻辑回归建模时,需要输入的特征都是数值型特征,所以在建立模型之前,我们要进行一些操作
为了方便模型的建立,我首先是将train和test拼接在一起
dataset = train.append(test,sort=False)#合并后的数据,方便一起清洗
编码数据处理
对Sex编码
对Sex数据进行编码,男的1女的0
sexdict = {'male':1, 'female':0} dataset.Sex = dataset.Sex.map(sexdict)
将Embarked, Cabin, Pclass进行one_hot编码
embarked2 = pd.get_dummies(dataset.Embarked, prefix = 'Embarked') dataset = pd.concat([dataset,embarked2], axis = 1) ## 将编码好的数据添加到原数据上 dataset.drop(['Embarked'], axis = 1, inplace=True) ## 过河拆桥 dataset.head(1)
建立family_size特征
SibSp和Parch分别代表了兄弟姐妹和配偶数量,以及父母与子女数量。通过这两个数字,我们可以计算出该乘客的随行人数,作为一列新的特征
dataset['family']=dataset.SibSp+dataset.Parch+1 dataset.head(1)
去掉无关的
最后把(我觉得)没有帮助的列删除
dataset.drop(['Ticket'], axis = 1, inplace=True) dataset.info()
接着我用seaborn的库画出维度间的相关性热度图
plt.figure(figsize=(14,12)) sns.heatmap(dataset.corr(),annot = True) plt.show()
热力图显示,生存与否,与Sex, Fare, Pclass_1相关度都比较高,使用这些维度进行预测
训练集与测试集
x_train = dataset.iloc[0:891, :] y_train = x_train.Survived x_train.drop(['Survived'], axis=1, inplace =True) x_test = dataset.iloc[891:, :] x_test.drop(['Survived'], axis=1, inplace =True) y_test = pd.read_csv('../data_files/1.Titantic_data/gender_submission.csv')#测试集 y_test=np.squeeze(y_test) x_train.shape,y_train.shape,x_test.shape, y_test.shape
Logistic Regression模型
首先建立Logistic Regression模型,这里导入了sklearn中的Logistic Regression模型,进行一个预测。
首先我们利用训练集去拟合模型,这里我先用x_train中的一百个数据去拟合我所建立的model模型,拟合后,我用这拟合出来的模型去预测训练集中剩下的数据,计算它的准确率
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score model = LogisticRegression() model.fit(x_train.iloc[0:-100,:],y_train.iloc[0:-100]) accuracy_score(model.predict(x_train.iloc[-100:,:]),y_train.iloc[-100:].values.reshape(-1,1)) # 0.82
后来的准确率大约是0.82
然后我们再用这个模型去预测我们的测试集,去看看我们的测试集的预测结果
prediction1 = model.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction1.astype(np.int32)}) result.to_csv("../results/predictions1.csv", index=False) result.head()
后来会生成一个predictions1.csv文件,将这个文件提交到kaggle上,我们可以得到我们的分数大约是0.76315,不过没事,这只是我们的base模型,我们接下来继续进行优化
在刚刚训练模型的时候,我只是拿了100个数据进行一个训练,所以我尝试将所有的数据进行训练看看结果,然后再用模型去预测测试集,然后再提交到keggle
model2 = LogisticRegression() model2.fit(x_train,y_train) prediction2 = model2.predict(x_test) accuracy_score(y_test['Survived'], prediction2) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction2.astype(np.int32)}) result.to_csv("../results/predictions2.csv", index=False) result.head()
结果还是比刚刚更高了,但是提高的不多,说明训练集的数据对模型的建立也是占有很重要的因素的,准确率大于0.76794
然后使用交叉验证
先简单看看cross validation情况下的打分
from sklearn.model_selection import cross_val_score, train_test_split cross_val_score(model2, x_train, y_train, cv=5,scoring='accuracy') # array([0.81005587, 0.79775281, 0.78651685, 0.76966292, 0.8258427 ])
可以看到出来,数据还是很不错的
接着我们还输出了相关性比较高的那几个模型
pd.concat((pd.DataFrame(x_train.columns, columns = ['variable']), pd.DataFrame(abs(model2.coef_[0]), columns = ['importance'])), axis = 1).sort_values(by='importance', ascending = False)[:15]
随机森林Random Forest模型
# 随机森林 from sklearn.ensemble import RandomForestClassifier model3 = RandomForestClassifier(n_estimators=500, criterion='entropy', max_depth=5, min_samples_split=1.0, min_samples_leaf=1, max_features='auto', bootstrap=False, oob_score=False, n_jobs=1, random_state=0, verbose=0) model3.fit(x_train, y_train) prediction3 = model3.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction3.astype(np.int32)}) result.to_csv("../results/predictions3.csv", index=False) accuracy_score(y_test['Survived'], prediction3)
用随机森林模型进行建立的,最后得到的结果还是差强任意,还没有单纯的logisticRegression好,还需要继续优化
决策树模型
from sklearn.tree import DecisionTreeClassifier model4 = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_impurity_decrease=0.0) model4.fit(x_train, y_train) prediction4 = model4.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction4.astype(np.int32)}) result.to_csv("../results/predictions4.csv", index=False) accuracy_score(y_test['Survived'], prediction4)
投票法
prediciton_vote = pd.DataFrame({'PassengerId': y_test['PassengerId'], 'Vote': prediction1.astype(int)+prediction2.astype(int)+prediction3.astype(int)}) vote = { 0:False,1:False,2:True,3:True} prediciton_vote['Survived']=prediciton_vote['Vote'].map(vote) prediciton_vote.head() result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediciton_vote.Survived.astype(np.int32)}) result.to_csv("../results/predictions5.csv", index=False)
最后的结果是最好的,大约得到的分数达到了0.77272
在这次模型预测中,最高也大概智能达到0.77左右,可能是在处理特征的时候还是不够好,不够这只是第一次而已,我一定会继续加油的!我也会继续找机会优化这些数据
Do one thing at a time, and do well.
一次只做一件事,做到最好!
4.完整代码
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn import metrics from sklearn.linear_model import LogisticRegression import seaborn as sns import warnings warnings.filterwarnings("ignore") train = pd.read_csv('../data_files/1.Titantic_data/train.csv') test = pd.read_csv('../data_files/1.Titantic_data/test.csv') train['Age']=train['Age'].fillna(train['Age'].mean()) # 用平均值填充 train.drop(['Cabin'],axis=1,inplace=True) # 删去Cabin的那一列数据 train.Embarked = train.Embarked.fillna('S') # 用’S'填补缺失值 test['Age']=test['Age'].fillna(test['Age'].mean()) test['Fare']=test['Fare'].fillna(test['Fare'].mean()) test.drop('Cabin',axis=1,inplace=True) dataset = train.append(test,sort=False)#合并后的数据,方便一起清洗 sexdict = {'male':1, 'female':0} dataset.Sex = dataset.Sex.map(sexdict) embarked2 = pd.get_dummies(dataset.Embarked, prefix = 'Embarked') dataset = pd.concat([dataset,embarked2], axis = 1) ## 将编码好的数据添加到原数据上 dataset.drop(['Embarked'], axis = 1, inplace=True) ## 过河拆桥 pclass = pd.get_dummies(dataset.Pclass, prefix = 'Pclass') dataset = pd.concat([dataset,pclass], axis = 1) dataset.drop(['Pclass'], axis = 1, inplace=True) dataset['family']=dataset.SibSp+dataset.Parch+1 dataset.drop(['Ticket','Name'], axis = 1, inplace=True) x_train = dataset.iloc[0:891, :] y_train = x_train.Survived x_train.drop(['Survived'], axis=1, inplace =True) x_test = dataset.iloc[891:, :] x_test.drop(['Survived'], axis=1, inplace =True) y_test = pd.read_csv('../data_files/1.Titantic_data/gender_submission.csv')#测试集 y_test=np.squeeze(y_test) x_train.shape,y_train.shape,x_test.shape, y_test.shape from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score model = LogisticRegression() model.fit(x_train.iloc[0:-100,:],y_train.iloc[0:-100]) accuracy_score(model.predict(x_train.iloc[-100:,:]),y_train.iloc[-100:].values.reshape(-1,1)) prediction1 = model.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction1.astype(np.int32)}) result.to_csv("../results/predictions1.csv", index=False) model2 = LogisticRegression() model2.fit(x_train,y_train) prediction2 = model2.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction2.astype(np.int32)}) result.to_csv("../results/predictions2.csv", index=False) # 随机森林 from sklearn.ensemble import RandomForestClassifier model3 = RandomForestClassifier(n_estimators=500, criterion='entropy', max_depth=5, min_samples_split=1.0, min_samples_leaf=1, max_features='auto', bootstrap=False, oob_score=False, n_jobs=1, random_state=0, verbose=0) model3.fit(x_train, y_train) prediction3 = model3.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction3.astype(np.int32)}) result.to_csv("../results/predictions3.csv", index=False) from sklearn.tree import DecisionTreeClassifier model4 = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_impurity_decrease=0.0) model4.fit(x_train, y_train) prediction4 = model4.predict(x_test) result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediction4.astype(np.int32)}) result.to_csv("../results/predictions4.csv", index=False) prediciton_vote = pd.DataFrame({'PassengerId': y_test['PassengerId'], 'Vote': prediction1.astype(int)+prediction2.astype(int)+prediction3.astype(int)}) vote = { 0:False,1:False,2:True,3:True} prediciton_vote['Survived']=prediciton_vote['Vote'].map(vote) prediciton_vote.head() result = pd.DataFrame({'PassengerId':y_test['PassengerId'].values, 'Survived':prediciton_vote.Survived.astype(np.int32)}) result.to_csv("../results/predictions5.csv", index=False)
补充优化
不定时会进行更新优化
每日一句
Don’t let the past steal your present.(别让过去悄然偷走了你的当下)
如果需要数据和代码,可以自提
路径1:我的gitee
路径2:百度网盘
链接:https://pan.baidu.com/s/1U9dteXf56yo3fQ7b9LETsA
提取码:5odf