1 import pandas as pd
2
3 import imp
4
5 from sklearn.linear_model import LogisticRegression
6
7 from sklearn.feature_extraction.text import CountVectorizer
8
9
10
11
12 df_test = pd.read_csv(r'testset.csv')
13
14 df_train = pd.read_csv(r'trainset.csv')
15
16 df_train.drop(columns=['article','id'],inplace=True)
17
18 df_test.drop(columns=['article'],inplace=True)
19
20
21
22 vectorizer = CountVectorizer(ngram_range=(1, 2),min_df=3,max_df=0.9,max_features=10000)
23
24 vectorizer.fit(df_train['word_seg'])
25
26 x_train =vectorizer.transform(df_train['word_seg'])
27
28 x_test =vectorizer.transform(df_test['word_seg'])
29
30 y_train =df_train['class']-1
31
32
33
34 lg = LogisticRegression(C=4,dual=True)
35
36 lg.fit(x_train,y_train)
37
38
39
40 y_test = lg.predict(x_test)
41
42
43
44 df_test['class'] = y_test.tolist()
45
46 df_test['class'] = df_test['class'] + 1
47
48 df_result =df_test.loc[:,['id','class']]
49
50 df_result.to_csv('./result.csv',index=False)
51
52
53
54 print("完成")
运行结果是:
pandas.errors.ParserError: Error tokenizing data. C error: out of memory
这是因为我的电脑内存太小,导致了内存溢出,因此换一台电脑就可以得到最终得分为72分的答案了。笔者最终排名位于全国前300名,算是一个个人感觉还不错的成绩了。