Skip to content

Commit 309ce61

Browse files
committed
Update notebook 3
1 parent 4997b41 commit 309ce61

File tree

2 files changed

+84
-59
lines changed

2 files changed

+84
-59
lines changed

03_net.ipynb

+83-58
Original file line numberDiff line numberDiff line change
@@ -1,86 +1,111 @@
11
{
22
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"# Net"
8+
]
9+
},
310
{
411
"cell_type": "code",
512
"execution_count": 1,
6-
"metadata": {
7-
"collapsed": true
8-
},
13+
"metadata": {},
914
"outputs": [],
1015
"source": [
11-
"import tensorflow as tf\n",
12-
"import numpy as np\n",
13-
"from tensorflow.examples.tutorials.mnist import input_data"
16+
"import tensorflow as tf"
1417
]
1518
},
1619
{
1720
"cell_type": "code",
18-
"execution_count": null,
19-
"metadata": {
20-
"collapsed": false
21-
},
22-
"outputs": [],
21+
"execution_count": 2,
22+
"metadata": {},
23+
"outputs": [
24+
{
25+
"name": "stdout",
26+
"output_type": "stream",
27+
"text": [
28+
"Populating the interactive namespace from numpy and matplotlib\n"
29+
]
30+
}
31+
],
2332
"source": [
24-
"def init_weights(shape):\n",
25-
" return tf.Variable(tf.random_normal(shape, stddev=0.01))\n",
26-
"\n",
27-
"def model(X, w_h, w_o):\n",
28-
" h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions\n",
29-
" return tf.matmul(h, w_o) # note that we dont take the softmax at the end because our cost fn does that for us\n",
30-
"\n",
31-
"mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n",
32-
"trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels"
33+
"%pylab inline"
3334
]
3435
},
3536
{
3637
"cell_type": "code",
37-
"execution_count": 5,
38-
"metadata": {
39-
"collapsed": false
40-
},
38+
"execution_count": 3,
39+
"metadata": {},
4140
"outputs": [],
4241
"source": [
43-
"X = tf.placeholder(\"float\", [None, 784])\n",
44-
"Y = tf.placeholder(\"float\", [None, 10])\n",
45-
"\n",
46-
"w_h = init_weights([784, 625]) # create symbolic variables\n",
47-
"w_o = init_weights([625, 10])\n",
48-
"\n",
49-
"py_x = model(X, w_h, w_o)\n",
50-
"\n",
51-
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # compute costs\n",
52-
"train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer\n",
53-
"predict_op = tf.argmax(py_x, 1)"
42+
"(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
43+
"x_train = x_train / 255\n",
44+
"x_test = x_test / 255"
5445
]
5546
},
5647
{
5748
"cell_type": "code",
58-
"execution_count": null,
59-
"metadata": {
60-
"collapsed": false
61-
},
49+
"execution_count": 4,
50+
"metadata": {},
6251
"outputs": [],
6352
"source": [
64-
"# Launch the graph in a session\n",
65-
"with tf.Session() as sess:\n",
66-
" # you need to initialize all variables\n",
67-
" tf.global_variables_initializer().run()\n",
68-
"\n",
69-
" for i in range(100):\n",
70-
" for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):\n",
71-
" sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})\n",
72-
" print(i, np.mean(np.argmax(teY, axis=1) ==\n",
73-
" sess.run(predict_op, feed_dict={X: teX})))"
53+
"model = tf.keras.models.Sequential([\n",
54+
" tf.keras.layers.Flatten(),\n",
55+
" tf.keras.layers.Dense(28*28, activation='sigmoid'),\n",
56+
" tf.keras.layers.Dense(10, activation='softmax')\n",
57+
"])"
7458
]
7559
},
7660
{
7761
"cell_type": "code",
78-
"execution_count": null,
79-
"metadata": {
80-
"collapsed": true
81-
},
62+
"execution_count": 5,
63+
"metadata": {},
8264
"outputs": [],
83-
"source": []
65+
"source": [
66+
"model.compile(\n",
67+
" optimizer=tf.keras.optimizers.SGD(lr=0.01),\n",
68+
" loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
69+
" metrics=['accuracy']\n",
70+
")"
71+
]
72+
},
73+
{
74+
"cell_type": "code",
75+
"execution_count": 6,
76+
"metadata": {},
77+
"outputs": [
78+
{
79+
"name": "stdout",
80+
"output_type": "stream",
81+
"text": [
82+
"Train on 60000 samples\n",
83+
"Epoch 1/10\n",
84+
"60000/60000 [==============================] - 5s 76us/sample - loss: 1.2824 - accuracy: 0.7118\n",
85+
"Epoch 2/10\n",
86+
"60000/60000 [==============================] - 5s 81us/sample - loss: 0.6083 - accuracy: 0.8557\n",
87+
"Epoch 3/10\n",
88+
"60000/60000 [==============================] - 5s 81us/sample - loss: 0.4741 - accuracy: 0.8762\n",
89+
"Epoch 4/10\n",
90+
"60000/60000 [==============================] - 5s 81us/sample - loss: 0.4186 - accuracy: 0.8864\n",
91+
"Epoch 5/10\n",
92+
"60000/60000 [==============================] - 5s 81us/sample - loss: 0.3876 - accuracy: 0.8923\n",
93+
"Epoch 6/10\n",
94+
"60000/60000 [==============================] - 5s 82us/sample - loss: 0.3678 - accuracy: 0.8968\n",
95+
"Epoch 7/10\n",
96+
"60000/60000 [==============================] - 5s 82us/sample - loss: 0.3537 - accuracy: 0.8989\n",
97+
"Epoch 8/10\n",
98+
"60000/60000 [==============================] - 5s 82us/sample - loss: 0.3430 - accuracy: 0.9012\n",
99+
"Epoch 9/10\n",
100+
"60000/60000 [==============================] - 5s 85us/sample - loss: 0.3345 - accuracy: 0.9047\n",
101+
"Epoch 10/10\n",
102+
"60000/60000 [==============================] - 5s 81us/sample - loss: 0.3274 - accuracy: 0.9058\n"
103+
]
104+
}
105+
],
106+
"source": [
107+
"history = model.fit(x_train, y_train, epochs=10)"
108+
]
84109
}
85110
],
86111
"metadata": {
@@ -92,16 +117,16 @@
92117
"language_info": {
93118
"codemirror_mode": {
94119
"name": "ipython",
95-
"version": 2
120+
"version": 3
96121
},
97122
"file_extension": ".py",
98123
"mimetype": "text/x-python",
99124
"name": "python",
100125
"nbconvert_exporter": "python",
101-
"pygments_lexer": "ipython2",
102-
"version": "2.7.13"
126+
"pygments_lexer": "ipython3",
127+
"version": "3.7.1"
103128
}
104129
},
105130
"nbformat": 4,
106-
"nbformat_minor": 0
131+
"nbformat_minor": 1
107132
}

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ Introduction to deep learning based on Google's TensorFlow framework. A fork of
66
* [Simple Multiplication](00_multiply.ipynb)
77
* [Linear Regression](01_linear_regression.ipynb)
88
* [Logistic Regression](02_logistic_regression.ipynb)
9-
* [Feedforward Neural Network (Multilayer Perceptron)](03_net.ipynb) (not yet updated)
9+
* [Feedforward Neural Network (Multilayer Perceptron)](03_net.ipynb)
1010
* [Deep Feedforward Neural Network (Multilayer Perceptron with 2 Hidden Layers O.o)](04_modern_net.ipynb) (not yet updated)
1111
* [Convolutional Neural Network](05_convolutional_net.ipynb) (not yet updated)
1212
* [Denoising Autoencoder](06_autoencoder.ipynb) (not yet updated)

0 commit comments

Comments
 (0)