forked from justmarkham/DAT5
-
Notifications
You must be signed in to change notification settings - Fork 1
/
19_gridsearchcv_exercise.py
63 lines (52 loc) · 2.13 KB
/
19_gridsearchcv_exercise.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
'''
EXERCISE: GridSearchCV with Stack Overflow competition data
'''
import pandas as pd
# define a function to create features
def make_features(filename):
df = pd.read_csv(filename, index_col=0)
df.rename(columns={'OwnerUndeletedAnswerCountAtPostTime':'Answers'}, inplace=True)
df['TitleLength'] = df.Title.apply(len)
df['BodyLength'] = df.BodyMarkdown.apply(len)
df['NumTags'] = df.loc[:, 'Tag1':'Tag5'].notnull().sum(axis=1)
return df
# apply function to both training and testing files
train = make_features('train.csv')
test = make_features('test.csv')
# define X and y
feature_cols = ['ReputationAtPostCreation', 'Answers', 'TitleLength', 'BodyLength', 'NumTags']
X = train[feature_cols]
y = train.OpenStatus
'''
MAIN TASK: Use GridSearchCV to find optimal parameters for KNeighborsClassifier.
- For "n_neighbors", try 5 different integer values.
- For "weights", try 'uniform' and 'distance'.
- Use 5-fold cross-validation (instead of 10-fold) to save computational time.
- Remember that log loss is your evaluation metric!
BONUS TASK #1: Once you have found optimal parameters, train your KNN model using
those parameters, make predictions on the test set, and submit those predictions.
BONUS TASK #2: Read the scikit-learn documentation for GridSearchCV to find the
shortcut for accomplishing bonus task #1.
'''
# MAIN TASK
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
from sklearn.grid_search import GridSearchCV
neighbors_range = [20, 40, 60, 80, 100]
weight_options = ['uniform', 'distance']
param_grid = dict(n_neighbors=neighbors_range, weights=weight_options)
grid = GridSearchCV(knn, param_grid, cv=5, scoring='log_loss')
grid.fit(X, y)
grid.grid_scores_
grid.best_score_
grid.best_params_
# BONUS TASK #1
knn = KNeighborsClassifier(n_neighbors=100, weights='uniform')
knn.fit(X, y)
y_prob = knn.predict_proba(test[feature_cols])[:, 1]
sub = pd.DataFrame({'id':test.index, 'OpenStatus':y_prob}).set_index('id')
sub.to_csv('sub.csv')
# BONUS TASK #2
y_prob = grid.predict_proba(test[feature_cols])[:, 1]
sub = pd.DataFrame({'id':test.index, 'OpenStatus':y_prob}).set_index('id')
sub.to_csv('sub.csv')