Skip to content

Commit e026727

Browse files
authored
Uploaded 7 py files
1 parent 0afc998 commit e026727

7 files changed

+300
-0
lines changed

Diff for: L1 Tensorflow Basic Functions.py

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import theano
2+
from theano import tensor
3+
import tensorflow as tf
4+
5+
a = tensor.dscalar()
6+
b = tensor.dscalar()
7+
8+
c = a + b
9+
10+
f = theano.function([a,b], c)
11+
12+
print("theano: ", f(55, 10))
13+
14+
###
15+
16+
a = tf.placeholder(tf.float32)
17+
b = tf.placeholder(tf.float32)
18+
add = tf.add(a, b)
19+
sess = tf.Session()
20+
binding = {a: 1.5, b: 2.5}
21+
c = sess.run(add, feed_dict=binding)
22+
print(c)

Diff for: L2 Pima Indian Diabetes.py

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import numpy
2+
from keras.models import Sequential
3+
from keras.layers import Dense
4+
5+
#fix random seed
6+
seed = 7
7+
numpy.random.seed(seed)
8+
9+
#load pima indians dataset
10+
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
11+
12+
#split into inputs (X) and outputs (Y)
13+
X = dataset[:,0:8]
14+
Y = dataset[:,8]
15+
16+
#create model (8 input neurons, 12 neurons in hidden layer 1,
17+
#8 neurons in hidden layer 2, 1 output neuron with sigmoid function)
18+
model = Sequential()
19+
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
20+
model.add(Dense(8, init='uniform', activation='relu'))
21+
model.add(Dense(1, init='uniform', activation='sigmoid'))
22+
23+
#Compile model. 'loss' to evaluate set of weights,
24+
#'optimizer' search through different weights for the network
25+
#'metrics' used to report the classification accuracy
26+
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
27+
28+
#Fit the model
29+
#'nb_epoch' for number of epochs to run
30+
#batch size => number of instances that are evaluated before a weight
31+
# update in the network
32+
model.fit(X, Y, validation_split=0.33, nb_epoch=150, batch_size=10)
33+
34+
# evaluate the model
35+
scores = model.evaluate(X, Y)
36+
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

Diff for: L3 PID w StratifiedKFold.py

+50
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
## using StratifiedKFold for validation
2+
3+
#load libraries
4+
from keras.models import Sequential
5+
from keras.layers import Dense
6+
from sklearn.cross_validation import StratifiedKFold
7+
import numpy
8+
9+
#fix random seed
10+
seed = 7
11+
numpy.random.seed(seed)
12+
13+
#load dataset
14+
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
15+
16+
#split into input (X) and output (Y)
17+
#array[<rows>,<columns>]
18+
#X takes the first 8 columns as input and
19+
#Y takes the last column as output
20+
X = dataset[:,0:8]
21+
Y = dataset[:,8]
22+
23+
# create a 10-fold cross validation test
24+
kfold = StratifiedKFold(y=Y, n_folds=10, shuffle=True, random_state = seed)
25+
#create an array to store the scores for each fold
26+
cvscores = []
27+
28+
#creates 10 models for each fold
29+
for i, (train, test) in enumerate(kfold):
30+
#create model
31+
model = Sequential()
32+
#use Dense() to create fully connected layers
33+
model.add(Dense(12, input_dim=8, init='uniform', activation = 'relu'))
34+
model.add(Dense(8, init = 'uniform', activation = 'relu'))
35+
model.add(Dense(1, init = 'uniform', activation = 'sigmoid'))
36+
37+
#compile model
38+
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
39+
40+
#fit the model
41+
#verbose = 0 to turn off verbose output for each epoch
42+
model.fit(X[train], Y[train], nb_epoch = 150, batch_size = 10, verbose = 0)
43+
44+
# evaluate the model
45+
scores = model.evaluate(X[test], Y[test], verbose = 0)
46+
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
47+
#add the score for each model into cvscores to determine mean score and std dev
48+
cvscores.append(scores[1] * 100)
49+
50+
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))

Diff for: L4 PID w Keras Wrappers.py

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
## using KerasClassifier to wrap deep learning models
2+
3+
#import libraries
4+
from keras.models import Sequential
5+
from keras.layers import Dense
6+
from keras.wrappers.scikit_learn import KerasClassifier
7+
from sklearn.cross_validation import StratifiedKFold
8+
from sklearn.cross_validation import cross_val_score
9+
import numpy
10+
import pandas
11+
12+
#define a function which creates a model.
13+
#KerasClassifier requires this function to be passed in as argument
14+
def create_model():
15+
#create model
16+
model = Sequential()
17+
# 8 input - 12 neurons hidden layer 1 - 8 neurons in hidden layer 2 - 1 output
18+
model.add(Dense(12, input_dim = 8, init = 'uniform', activation = 'relu'))
19+
model.add(Dense(8, init = 'uniform', activation = 'relu'))
20+
model.add(Dense(1, init = 'uniform', activation = 'sigmoid'))
21+
#compile model
22+
model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
23+
return model
24+
25+
#fix seed
26+
seed = 7
27+
numpy.random.seed(seed)
28+
#load pima indians dataset
29+
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
30+
#split into input (X) and output (Y) variables
31+
X = dataset[:,0:8]
32+
Y = dataset[:,8]
33+
34+
#create model
35+
model = KerasClassifier(build_fn=create_model, nb_epoch = 150, batch_size = 10)
36+
37+
#evaluate using 10-fold cross validation
38+
kfold = StratifiedKFold(y=Y, n_folds=10, shuffle=True, random_state=seed)
39+
results = cross_val_score(model, X, Y ,cv = kfold)
40+
print(results.mean())
41+
42+

Diff for: L5 PID w Grid Search.py

+55
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
## Testing different configurations of optimizer, init, epoch and batchsize
2+
## using GridSearchCV()
3+
4+
# load libraries
5+
from keras.models import Sequential
6+
from keras.layers import Dense
7+
from keras.wrappers.scikit_learn import KerasClassifier
8+
from sklearn.grid_search import GridSearchCV
9+
import numpy
10+
import pandas
11+
12+
# define create_model function for KerasClassifier wrapper
13+
# include default values for grid search, in this case, optimizer = 'rmsprop', init = 'glorot_uniform'
14+
def create_model(optimizer = 'rmsprop', init = 'glorot_uniform'):
15+
# create model
16+
model = Sequential()
17+
# 8 -> 12 -> 8 -> 1
18+
model.add(Dense(12, input_dim=8, init = init, activation = 'relu'))
19+
model.add(Dense(8, init = init, activation = 'relu'))
20+
model.add(Dense(1, init = init, activation = 'sigmoid'))
21+
#compile model
22+
model.compile(loss='binary_crossentropy', optimizer = optimizer, metrics=['accuracy'])
23+
return model
24+
25+
# fix seed
26+
seed = 7
27+
numpy.random.seed(seed)
28+
29+
# load dataset
30+
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
31+
32+
# split into input (X) and output (Y) variables
33+
X = dataset[:, 0:8]
34+
Y = dataset[:,8]
35+
36+
# create model using KerasClassifier class
37+
model = KerasClassifier(build_fn=create_model)
38+
39+
# create arrays of optimizer, init, epoch and batchsize for grid search
40+
optimizers = ['rmsprop', 'adam']
41+
init = ['glorot_uniform', 'normal', 'uniform']
42+
epochs = numpy.array([50,100,150])
43+
batches = numpy.array([5,10,20])
44+
45+
# create a dict for grid search
46+
param_grid = dict(optimizer = optimizers, nb_epoch = epochs, batch_size = batches, init = init)
47+
grid = GridSearchCV(estimator = model, param_grid=param_grid)
48+
grid_result = grid.fit(X, Y)
49+
50+
# summarize results
51+
#gives the best result
52+
print("best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
53+
#gives all the results
54+
for params, mean_score, scores in grid_result.grid_scores_:
55+
print("%f (%f) with: %r" % (scores.mean(), scores.std(), params))

Diff for: L6 Iris Multiclass Classification.py

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# import libraries
2+
import numpy
3+
import pandas
4+
from keras.models import Sequential
5+
from keras.layers import Dense
6+
from keras.wrappers.scikit_learn import KerasClassifier
7+
from keras.utils import np_utils
8+
from sklearn.cross_validation import cross_val_score
9+
from sklearn.cross_validation import KFold
10+
from sklearn.preprocessing import LabelEncoder
11+
from sklearn.pipeline import Pipeline
12+
13+
# fix seed
14+
seed = 7
15+
numpy.random.seed(seed)
16+
17+
# load dataset
18+
## it is easier to use 'pandas' to load dataset instead of 'numpy' if output variable contains strings
19+
dataframe = pandas.read_csv("Iris.csv", header = 0)
20+
dataset = dataframe.values
21+
X = dataset[:,1:5].astype(float)
22+
Y = dataset[:,5]
23+
24+
#encode class values as integers
25+
encoder = LabelEncoder()
26+
encoder.fit(Y)
27+
#encoder.transform() does the conversion into integer
28+
encoded_Y = encoder.transform(Y)
29+
#convert integers to dummy variables (ie one hot encoding)
30+
dummy_y = np_utils.to_categorical(encoded_Y)
31+
32+
# define baseline model
33+
## fully connected network with 4 inputs -> [4 hidden nodes] -> 3 outputs
34+
## 3 outputs as we did one hot encoding, so 1 output for each class
35+
def baseline_model():
36+
model = Sequential()
37+
model.add(Dense(4, input_dim = 4, init = 'normal', activation = 'relu'))
38+
## output uses sigmoid activation function to ensure output values are in the range of 0 and 1
39+
model.add(Dense(3, init = 'normal', activation = 'sigmoid'))
40+
#compile model
41+
## model uses ADAM gradient descent optimization algo
42+
## and uses a logarithmic loss function, called categorical_crossentropy in keras
43+
model.compile(loss='categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
44+
return model
45+
46+
estimator = KerasClassifier(build_fn=baseline_model, nb_epoch = 100, batch_size = 5, verbose = 0)
47+
48+
#using k-fold validation
49+
kfold = KFold(n=len(X), n_folds=10, shuffle= True, random_state = seed)
50+
51+
results = cross_val_score(estimator, X, dummy_y, cv = kfold)
52+
print("Accuracy: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))

Diff for: L7 Sonar Object Classification.py

+43
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
#import libraries
2+
import numpy
3+
import pandas
4+
from keras.models import Sequential
5+
from keras.layers import Dense
6+
from keras.wrappers.scikit_learn import KerasClassifier
7+
from sklearn.cross_validation import cross_val_score
8+
from sklearn.preprocessing import LabelEncoder
9+
from sklearn.cross_validation import StratifiedKFold
10+
from sklearn.preprocessing import StandardScaler
11+
from sklearn.pipeline import Pipeline
12+
13+
seed = 7
14+
numpy.random.seed(seed)
15+
16+
#load data
17+
dataframe = pandas.read_csv("sonar.csv", header = None)
18+
dataset = dataframe.values
19+
# split into input (X) and output (Y)
20+
X = dataset[:,0:60].astype(float)
21+
Y = dataset[:,60]
22+
23+
# one hot encoding
24+
encoder = LabelEncoder()
25+
encoder.fit(Y)
26+
encoded_Y = encoder.transform(Y)
27+
28+
#baseline model
29+
def create_baseline():
30+
#create model
31+
#we start with same number of neurons as input in hidden layer as a starting point
32+
model = Sequential()
33+
model.add(Dense(60, input_dim = 60, init = 'normal', activation = 'relu'))
34+
model.add(Dense(1, init = 'normal', activation= 'sigmoid'))
35+
#compile model
36+
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
37+
return model
38+
39+
estimator = KerasClassifier(build_fn = create_baseline, nb_epoch = 100, batch_size = 5, verbose = 0)
40+
kfold = StratifiedKFold(y = encoded_Y, n_folds = 10, shuffle = True, random_state=seed)
41+
results = cross_val_score(estimator, X, encoded_Y, cv = kfold)
42+
print("Results: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
43+

0 commit comments

Comments
 (0)