附件:仿照实现sklearn中的kNN分类器,调用方法完全一样。
pycharm,sublime,记事本可以用各种写,你懂的
kNN.py:
import numpy as np from math import sqrt from collections import Counter from .metrics import accuracy_score class KNNClassifier: def __init__(self, k): """初始化kNN分类器""" assert k >= 1, "k must be valid" self.k = k self._X_train = None self._y_train = None def fit(self, X_train, y_train): """根据训练数据集X_train和y_train训练kNN分类器""" assert X_train.shape[0] == y_train.shape[0], \ "the size of X_train must equal to the size of y_train" assert self.k <= X_train.shape[0], \ "the size of X_train must be at least k" self._X_train = X_train self._y_train = y_train return self def predict(self, X_predict): """给定待测数据集X_predict,返回表示X_predict的结果向量""" assert self._X_train is not None and self._y_train is not None, \ "must fit before predict" assert X_predict.shape[1] == self._X_train.shape[1], \ "the feature number of X_predict must be equal to X_train" y_predict = [self._predict(x) for x in X_predict] return np.array(y_predict) def _predict(self, x): """给定单个待预测数据x,返回x的预测结果""" assert x.shape[0] == self._X_train.shape[1], \ "the feature number of x must be equal to X_train" distances = [sqrt(np.sum((x_train - x) ** 2)) for x_train in self._X_train] nearest = np.argsort(distances) topK_y = [self._y_train[i] for i in nearest[:self.k]] votes = Counter(topK_y) return votes.most_common(1)[0][0] def score(self, X_test, y_test): """根据测试数据集确定当前准确度""" y_predict = self.predict(X_test) return accuracy_score(y_test, y_predict) def __repr__(self): return "KNN(k=%d)" % self.k
metrics.py:
import numpy as np def accuracy_score(y_true, y_predict): """计算y_true和y_predict之间的准确率""" # 保证预测对象的个数与正确对象个数相同,才能一一对比 assert y_true.shape[0] == y_predict.shape[0], \ "the size of y_true must be equal to the size of y_predict" return sum(y_predict == y_true) / len(y_true)
model_selection.py:
import numpy as np def train_test_split(X, y, test_ratio=0.2, seed=None): """将数据X和y按照test_ratio分割成X_train, X_test, y_train, y_test""" # 确保有多少个数据就有多少个标签 assert X.shape[0] == y.shape[0], \ "the size of X must be equal to the size of y" assert 0.0 <= test_ratio <= 1.0, \ "test_ratio must be valid" if seed: np.random.seed(seed) shuffle_indexes = np.random.permutation(len(X)) # 打乱数据,形成150个索引的随机排列 test_size = int(len(X) * test_ratio) test_indexes = shuffle_indexes[:test_size] # 测试数据集索引 train_indexes = shuffle_indexes[test_size:] # 训练数据集 X_train = X[train_indexes] y_train = y[train_indexes] X_test = X[test_indexes] y_test = y[test_indexes] return X_train, X_test, y_train, y_test
preprocessing.py:
import numpy as np class StandardScaler: def __init__(self): self.mean_ = None self.scale_ = None def fit(self, X): """根据训练数据集X获得数据的均值和方差""" assert X.ndim == 2, "The dimension of X must be 2" self.mean_ = np.array([np.mean(X[:,i]) for i in range(X.shape[1])]) self.scale_ = np.array([np.std(X[:, i]) for i in range(X.shape[1])]) return self def transform(self, X): """将X根据这个StandardScaler进行均值方差归一化处理""" assert X.ndim == 2, "The dimension of X must be 2" assert self.mean_ is not None and self.scale_ is not None, \ "must fit before transform!" assert X.shape[1] == len(self.mean_), \ "the feature number of X must be equal to mean_ and std_" resX = np.empty(shape=X.shape, dtype=float) for col in range(X.shape[1]): resX[:,col] = (X[:,col] - self.mean_[col]) / self.scale_[col] return resX