iPython code請進
- print('神經網路模型')
- import keras
- keras.__version__
- !nvcc --version
- from keras.datasets import mnist
- ? mnist
- import matplotlib.pyplot as plt
- import numpy as np
- (X_train, y_train), (X_test, y_test)=mnist.load_data()
- X_train.shape
- y_train.shape
- np.set_printoptions(linewidth=np.inf)
- X_train[7]
- y_train[0:10]
- plt.figure(figsize=(5, 5))
- for i in range(9):
- plt.subplot(3, 3, i+1)
- plt.imshow(X_train[i])
- plt.savefig("nums.jpg")
- plt.show()
- X_train=X_train.reshape(60000, 784).astype('float32')
- X_test=X_test.reshape(10000, 784).astype('float32')
- #normalizing
- X_train/=255
- X_test/=255
- X_train[0]
- from keras.models import Sequential
- from tensorflow.keras.layers import Dense
- from tensorflow.keras import optimizers
- from tensorflow.keras.utils import *
- n_class=10
- y_train=to_categorical(y_train, n_class) #one-hot coding
- y_test=to_categorical(y_test, n_class)
- print('處理好了後面就是關鍵,剩下5個指令就好了')
- model=Sequential()
- model.add(Dense(64, activation='tanh', input_shape=(784, ))) #sigmoid, tanh, relu
- model.add(Dense(10, activation='softmax'))
- model.compile(
- loss='mean_squared_error',
- optimizer=optimizers.SGD(learning_rate=0.01), #隨機梯度下降法(Stochastic gradient descent, SGD)
- metrics=['accuracy']
- )
- model.fit(
- X_train, y_train, batch_size=128, epochs=200,
- verbose=1,
- validation_data=(X_test, y_test)
- )
沒有留言:
張貼留言