|
1 | 1 | {
|
2 | 2 | "cells": [
|
| 3 | + { |
| 4 | + "cell_type": "markdown", |
| 5 | + "metadata": {}, |
| 6 | + "source": [ |
| 7 | + "# Net" |
| 8 | + ] |
| 9 | + }, |
3 | 10 | {
|
4 | 11 | "cell_type": "code",
|
5 | 12 | "execution_count": 1,
|
6 |
| - "metadata": { |
7 |
| - "collapsed": true |
8 |
| - }, |
| 13 | + "metadata": {}, |
9 | 14 | "outputs": [],
|
10 | 15 | "source": [
|
11 |
| - "import tensorflow as tf\n", |
12 |
| - "import numpy as np\n", |
13 |
| - "from tensorflow.examples.tutorials.mnist import input_data" |
| 16 | + "import tensorflow as tf" |
14 | 17 | ]
|
15 | 18 | },
|
16 | 19 | {
|
17 | 20 | "cell_type": "code",
|
18 |
| - "execution_count": null, |
19 |
| - "metadata": { |
20 |
| - "collapsed": false |
21 |
| - }, |
22 |
| - "outputs": [], |
| 21 | + "execution_count": 2, |
| 22 | + "metadata": {}, |
| 23 | + "outputs": [ |
| 24 | + { |
| 25 | + "name": "stdout", |
| 26 | + "output_type": "stream", |
| 27 | + "text": [ |
| 28 | + "Populating the interactive namespace from numpy and matplotlib\n" |
| 29 | + ] |
| 30 | + } |
| 31 | + ], |
23 | 32 | "source": [
|
24 |
| - "def init_weights(shape):\n", |
25 |
| - " return tf.Variable(tf.random_normal(shape, stddev=0.01))\n", |
26 |
| - "\n", |
27 |
| - "def model(X, w_h, w_o):\n", |
28 |
| - " h = tf.nn.sigmoid(tf.matmul(X, w_h)) # this is a basic mlp, think 2 stacked logistic regressions\n", |
29 |
| - " return tf.matmul(h, w_o) # note that we dont take the softmax at the end because our cost fn does that for us\n", |
30 |
| - "\n", |
31 |
| - "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n", |
32 |
| - "trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels" |
| 33 | + "%pylab inline" |
33 | 34 | ]
|
34 | 35 | },
|
35 | 36 | {
|
36 | 37 | "cell_type": "code",
|
37 |
| - "execution_count": 5, |
38 |
| - "metadata": { |
39 |
| - "collapsed": false |
40 |
| - }, |
| 38 | + "execution_count": 3, |
| 39 | + "metadata": {}, |
41 | 40 | "outputs": [],
|
42 | 41 | "source": [
|
43 |
| - "X = tf.placeholder(\"float\", [None, 784])\n", |
44 |
| - "Y = tf.placeholder(\"float\", [None, 10])\n", |
45 |
| - "\n", |
46 |
| - "w_h = init_weights([784, 625]) # create symbolic variables\n", |
47 |
| - "w_o = init_weights([625, 10])\n", |
48 |
| - "\n", |
49 |
| - "py_x = model(X, w_h, w_o)\n", |
50 |
| - "\n", |
51 |
| - "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y)) # compute costs\n", |
52 |
| - "train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer\n", |
53 |
| - "predict_op = tf.argmax(py_x, 1)" |
| 42 | + "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n", |
| 43 | + "x_train = x_train / 255\n", |
| 44 | + "x_test = x_test / 255" |
54 | 45 | ]
|
55 | 46 | },
|
56 | 47 | {
|
57 | 48 | "cell_type": "code",
|
58 |
| - "execution_count": null, |
59 |
| - "metadata": { |
60 |
| - "collapsed": false |
61 |
| - }, |
| 49 | + "execution_count": 4, |
| 50 | + "metadata": {}, |
62 | 51 | "outputs": [],
|
63 | 52 | "source": [
|
64 |
| - "# Launch the graph in a session\n", |
65 |
| - "with tf.Session() as sess:\n", |
66 |
| - " # you need to initialize all variables\n", |
67 |
| - " tf.global_variables_initializer().run()\n", |
68 |
| - "\n", |
69 |
| - " for i in range(100):\n", |
70 |
| - " for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):\n", |
71 |
| - " sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})\n", |
72 |
| - " print(i, np.mean(np.argmax(teY, axis=1) ==\n", |
73 |
| - " sess.run(predict_op, feed_dict={X: teX})))" |
| 53 | + "model = tf.keras.models.Sequential([\n", |
| 54 | + " tf.keras.layers.Flatten(),\n", |
| 55 | + " tf.keras.layers.Dense(28*28, activation='sigmoid'),\n", |
| 56 | + " tf.keras.layers.Dense(10, activation='softmax')\n", |
| 57 | + "])" |
74 | 58 | ]
|
75 | 59 | },
|
76 | 60 | {
|
77 | 61 | "cell_type": "code",
|
78 |
| - "execution_count": null, |
79 |
| - "metadata": { |
80 |
| - "collapsed": true |
81 |
| - }, |
| 62 | + "execution_count": 5, |
| 63 | + "metadata": {}, |
82 | 64 | "outputs": [],
|
83 |
| - "source": [] |
| 65 | + "source": [ |
| 66 | + "model.compile(\n", |
| 67 | + " optimizer=tf.keras.optimizers.SGD(lr=0.01),\n", |
| 68 | + " loss=tf.keras.losses.sparse_categorical_crossentropy,\n", |
| 69 | + " metrics=['accuracy']\n", |
| 70 | + ")" |
| 71 | + ] |
| 72 | + }, |
| 73 | + { |
| 74 | + "cell_type": "code", |
| 75 | + "execution_count": 6, |
| 76 | + "metadata": {}, |
| 77 | + "outputs": [ |
| 78 | + { |
| 79 | + "name": "stdout", |
| 80 | + "output_type": "stream", |
| 81 | + "text": [ |
| 82 | + "Train on 60000 samples\n", |
| 83 | + "Epoch 1/10\n", |
| 84 | + "60000/60000 [==============================] - 5s 76us/sample - loss: 1.2824 - accuracy: 0.7118\n", |
| 85 | + "Epoch 2/10\n", |
| 86 | + "60000/60000 [==============================] - 5s 81us/sample - loss: 0.6083 - accuracy: 0.8557\n", |
| 87 | + "Epoch 3/10\n", |
| 88 | + "60000/60000 [==============================] - 5s 81us/sample - loss: 0.4741 - accuracy: 0.8762\n", |
| 89 | + "Epoch 4/10\n", |
| 90 | + "60000/60000 [==============================] - 5s 81us/sample - loss: 0.4186 - accuracy: 0.8864\n", |
| 91 | + "Epoch 5/10\n", |
| 92 | + "60000/60000 [==============================] - 5s 81us/sample - loss: 0.3876 - accuracy: 0.8923\n", |
| 93 | + "Epoch 6/10\n", |
| 94 | + "60000/60000 [==============================] - 5s 82us/sample - loss: 0.3678 - accuracy: 0.8968\n", |
| 95 | + "Epoch 7/10\n", |
| 96 | + "60000/60000 [==============================] - 5s 82us/sample - loss: 0.3537 - accuracy: 0.8989\n", |
| 97 | + "Epoch 8/10\n", |
| 98 | + "60000/60000 [==============================] - 5s 82us/sample - loss: 0.3430 - accuracy: 0.9012\n", |
| 99 | + "Epoch 9/10\n", |
| 100 | + "60000/60000 [==============================] - 5s 85us/sample - loss: 0.3345 - accuracy: 0.9047\n", |
| 101 | + "Epoch 10/10\n", |
| 102 | + "60000/60000 [==============================] - 5s 81us/sample - loss: 0.3274 - accuracy: 0.9058\n" |
| 103 | + ] |
| 104 | + } |
| 105 | + ], |
| 106 | + "source": [ |
| 107 | + "history = model.fit(x_train, y_train, epochs=10)" |
| 108 | + ] |
84 | 109 | }
|
85 | 110 | ],
|
86 | 111 | "metadata": {
|
|
92 | 117 | "language_info": {
|
93 | 118 | "codemirror_mode": {
|
94 | 119 | "name": "ipython",
|
95 |
| - "version": 2 |
| 120 | + "version": 3 |
96 | 121 | },
|
97 | 122 | "file_extension": ".py",
|
98 | 123 | "mimetype": "text/x-python",
|
99 | 124 | "name": "python",
|
100 | 125 | "nbconvert_exporter": "python",
|
101 |
| - "pygments_lexer": "ipython2", |
102 |
| - "version": "2.7.13" |
| 126 | + "pygments_lexer": "ipython3", |
| 127 | + "version": "3.7.1" |
103 | 128 | }
|
104 | 129 | },
|
105 | 130 | "nbformat": 4,
|
106 |
| - "nbformat_minor": 0 |
| 131 | + "nbformat_minor": 1 |
107 | 132 | }
|
0 commit comments