kaggle上边这个签到的项目有同学做过没有,为啥用KNN全量跑只有10%多一点的准确率呀[流泪]
Facebook V:Predicting Check ins
全量3000w差不多,反而只拿两万条跑还有30%准确率,下面是代码:
import pandas as pd
import numpy as np
import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
import gc #做内存释放
data = pd.read_csv(r'E:\kaggle dataset\train.csv')
data.head()
time_value = pd.to_datetime(data["time"], unit="s") #Name: time, Length: 83197
date = pd.DatetimeIndex(time_value)
data["day"] = date.day
data["weekday"] = date.weekday
data["hour"] = date.hour
data.head()
place_count = data.groupby("place_id").count()["row_id"] #2514 rows × 8 columns
place_count[place_count > 3].head()
data_final = data[data["place_id"].isin(place_count[place_count>3].index.values)]
data_final.head() #80910 rows × 9 columns
x = data_final[["x", "y", "accuracy", "day", "weekday", "hour"]]
y = data_final["place_id"]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=22)
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train) # 训练集标准化
x_test = transfer.transform(x_test) # 测试集标准化
x_train.shape
a = datetime.datetime.now()
estimator = KNeighborsClassifier() #超参数搜索不需提前设置参数
param_dict = {"n_neighbors": [1,2,3,4,5,7,8,9,10]}
estimator = GridSearchCV(estimator, param_grid=param_dict, cv=2) # 10折,数据量不大,可以多折
estimator.fit(x_train, y_train) #开始训练
b= datetime.datetime.now()
print(a.strftime('%Y-%m-%d %H:%M:%S'),'\n',b.strftime('%Y-%m-%d %H:%M:%S'),'\n')
a = datetime.datetime.now()
%time score = estimator.score(x_test, y_test) # 测试集的特征值,测试集的目标值
print("准确率:", score)
b= datetime.datetime.now()
print(a.strftime('%Y-%m-%d %H:%M:%S'),'\n',b.strftime('%Y-%m-%d %H:%M:%S'),'\n')