1
+ #import libraries
2
+ import numpy
3
+ import pandas
4
+ from keras .models import Sequential
5
+ from keras .layers import Dense
6
+ from keras .wrappers .scikit_learn import KerasClassifier
7
+ from sklearn .cross_validation import cross_val_score
8
+ from sklearn .preprocessing import LabelEncoder
9
+ from sklearn .cross_validation import StratifiedKFold
10
+ from sklearn .preprocessing import StandardScaler
11
+ from sklearn .pipeline import Pipeline
12
+
13
+ seed = 7
14
+ numpy .random .seed (seed )
15
+
16
+ #load data
17
+ dataframe = pandas .read_csv ("sonar.csv" , header = None )
18
+ dataset = dataframe .values
19
+ # split into input (X) and output (Y)
20
+ X = dataset [:,0 :60 ].astype (float )
21
+ Y = dataset [:,60 ]
22
+
23
+ # one hot encoding
24
+ encoder = LabelEncoder ()
25
+ encoder .fit (Y )
26
+ encoded_Y = encoder .transform (Y )
27
+
28
+ #baseline model
29
+ def create_baseline ():
30
+ #create model
31
+ #we start with same number of neurons as input in hidden layer as a starting point
32
+ model = Sequential ()
33
+ model .add (Dense (60 , input_dim = 60 , init = 'normal' , activation = 'relu' ))
34
+ model .add (Dense (1 , init = 'normal' , activation = 'sigmoid' ))
35
+ #compile model
36
+ model .compile (loss = 'binary_crossentropy' , optimizer = 'adam' , metrics = ['accuracy' ])
37
+ return model
38
+
39
+ #evaluating the baseline model with standardized dataset
40
+ estimators = []
41
+ estimators .append (('standardize' , StandardScaler ()))
42
+ estimators .append (('mlp' , KerasClassifier (build_fn = create_baseline , nb_epoch = 100 , batch_size = 5 , verbose = 0 )))
43
+ pipeline = Pipeline (estimators )
44
+ kfold = StratifiedKFold (y = encoded_Y , n_folds = 10 , shuffle = True , random_state = seed )
45
+ results = cross_val_score (pipeline , X , encoded_Y , cv = kfold )
46
+ print ("Standardized: %.2f%% (%.2f%%)" % (results .mean ()* 100 , results .std ()* 100 ))
0 commit comments