I want to implement a Neural Network for MNIST dataset, and I find a lot of example on the net. But I want study this problem using a different approach: I want create 10 NNs (as the number of classes) in which I classify only a class with the rest of the others (example: first NN analyze only the "1" class vs the others). It's just an exercise (I'm python newbie and I want to learn).
This is my actually python code, do you have any suggests to modify mi code in order to obtain the classes separation that I explained above?
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
import numpy
from keras.utils import np_utils
numpy.random.seed()
(X_train, y_train), (X_test, y_test) = mnist.load_data()
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train.shape[1],X_train.shape[2],X_train.shape[3])
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')
X_train = X_train / 255
X_test = X_test / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_test)
num_classes = y_test.shape[1]
def baseline_model():
model = Sequential()
model.add(Dense(num_pixels, input_dim=num_pixels, kernel_initializer='normal', activation='relu'))
model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)
model.summary()
scores = model.evaluate(X_test, y_test, verbose=1)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))