TensorFlowサンプルコード
Revision | 89734daf58fc074503ecd51ed82614ec6068aa98 (tree) |
---|---|
Time | 2018-02-16 22:55:02 |
Author | hylom <hylom@hylo...> |
Commiter | hylom |
add Deep Learning and CNN codes
@@ -0,0 +1,233 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +# 入力画像の幅・高さ・チャネル数 | |
9 | +INPUT_WIDTH = 100 | |
10 | +INPUT_HEIGHT = 100 | |
11 | +INPUT_CHANNELS = 3 | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | + | |
14 | +# 1つめの畳み込み-プーリング層のパラメータ | |
15 | +CONV1_SIZE = 5 # 畳み込みフィルタのサイズ | |
16 | +CONV1_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
17 | +CONV1_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
18 | +POOL1_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
19 | +POOL1_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
20 | + | |
21 | +# 2つめの畳み込み-プーリング層のパラメータ | |
22 | +CONV2_SIZE = 5 # 畳み込みフィルタのサイズ | |
23 | +CONV2_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
24 | +CONV2_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
25 | +POOL2_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
26 | +POOL2_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
27 | + | |
28 | +# 全結合層のサイズ | |
29 | +W5_SIZE=25 * 25 * CONV2_CHANNELS | |
30 | + | |
31 | +# 出力サイズ | |
32 | +OUTPUT_SIZE = 3 | |
33 | +LABEL_SIZE = OUTPUT_SIZE | |
34 | + | |
35 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
36 | + "../data2/teach_dog.tfrecord", | |
37 | + "../data2/teach_monkey.tfrecord"] | |
38 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
39 | + "../data2/test_dog.tfrecord", | |
40 | + "../data2/test_monkey.tfrecord"] | |
41 | +MODEL_FILE = "./cnn_model" | |
42 | + | |
43 | + | |
44 | +# 結果をそろえるために乱数の種を指定 | |
45 | +tf.set_random_seed(1111) | |
46 | + | |
47 | +## 入力と計算グラフを定義 | |
48 | +with tf.variable_scope('model') as scope: | |
49 | + | |
50 | + # 入力(=第1層)および正答を入力するプレースホルダを定義 | |
51 | + x1 = tf.placeholder(dtype=tf.float32, name="x1") | |
52 | + y = tf.placeholder(dtype=tf.float32, name="y") | |
53 | + | |
54 | + # 第2層(畳み込み処理) | |
55 | + W1 = tf.get_variable("W1", | |
56 | + shape=[CONV1_SIZE, CONV1_SIZE, INPUT_CHANNELS, CONV1_CHANNELS], | |
57 | + dtype=tf.float32, | |
58 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
59 | + | |
60 | + b1 = tf.get_variable("b1", | |
61 | + shape=[CONV1_CHANNELS], | |
62 | + dtype=tf.float32, | |
63 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
64 | + u1 = tf.nn.bias_add(tf.nn.conv2d(x1, W1, CONV1_STRIDE, "SAME"), b1, name="u1") | |
65 | + x2 = tf.nn.relu(u1, name="x2") | |
66 | + | |
67 | + # 第3層(プーリング層) | |
68 | + x3 = tf.nn.max_pool(x2, POOL1_SIZE, POOL1_STRIDE, "SAME", name="x3") | |
69 | + | |
70 | + # 第4層(畳み込み処理) | |
71 | + W3 = tf.get_variable("W3", | |
72 | + shape=[CONV2_SIZE, CONV2_SIZE, CONV1_CHANNELS, CONV2_CHANNELS], | |
73 | + dtype=tf.float32, | |
74 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
75 | + | |
76 | + b3 = tf.get_variable("b3", | |
77 | + shape=[CONV2_CHANNELS], | |
78 | + dtype=tf.float32, | |
79 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
80 | + u3 = tf.nn.bias_add(tf.nn.conv2d(x3, W3, CONV1_STRIDE, "SAME"), b3, name="u3") | |
81 | + x4 = tf.nn.relu(u3, name="x4") | |
82 | + | |
83 | + # 第5層(プーリング層) | |
84 | + x5 = tf.nn.max_pool(x4, POOL2_SIZE, POOL2_STRIDE, "SAME", name="x5") | |
85 | + | |
86 | + # 第6層(出力層) | |
87 | + W5 = tf.get_variable("W5", | |
88 | + shape=[W5_SIZE, OUTPUT_SIZE], | |
89 | + dtype=tf.float32, | |
90 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
91 | + b5 = tf.get_variable("b5", | |
92 | + shape=[OUTPUT_SIZE], | |
93 | + dtype=tf.float32, | |
94 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
95 | + x5_ = tf.reshape(x5, [-1, W5_SIZE], name="x5_") | |
96 | + x6 = tf.nn.softmax(tf.matmul(x5_, W5) + b5, name="x6") | |
97 | + | |
98 | + # コスト関数 | |
99 | + cross_entropy = -tf.reduce_sum(y * tf.log(x6), name="cross_entropy") | |
100 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
101 | + | |
102 | + # 正答率 | |
103 | + correct = tf.equal(tf.argmax(x6,1), tf.argmax(y, 1), name="correct") | |
104 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
105 | + tf.summary.scalar('accuracy', accuracy) | |
106 | + | |
107 | + | |
108 | + # 最適化アルゴリズムを定義 | |
109 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
110 | + optimizer = tf.train.AdamOptimizer(1e-4, name="optimizer") | |
111 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
112 | + | |
113 | + # 学習結果を保存するためのオブジェクトを用意 | |
114 | + saver = tf.train.Saver() | |
115 | + | |
116 | +# 読み込んだデータの変換用関数 | |
117 | +def map_dataset(serialized): | |
118 | + features = { | |
119 | + 'label': tf.FixedLenFeature([], tf.int64), | |
120 | + 'height': tf.FixedLenFeature([], tf.int64), | |
121 | + 'width': tf.FixedLenFeature([], tf.int64), | |
122 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
123 | + } | |
124 | + parsed = tf.parse_single_example(serialized, features) | |
125 | + | |
126 | + # 読み込んだデータを変換する | |
127 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
128 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
129 | + [raw_label, 0], | |
130 | + [1, LABEL_SIZE]), | |
131 | + [LABEL_SIZE]) | |
132 | + | |
133 | + image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
134 | + return (image, label, raw_label) | |
135 | + | |
136 | +## データセットの読み込み | |
137 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
138 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
139 | + .map(map_dataset)\ | |
140 | + .batch(600) | |
141 | + | |
142 | +# データにアクセスするためのイテレータを作成 | |
143 | +iterator = dataset.make_one_shot_iterator() | |
144 | +item = iterator.get_next() | |
145 | + | |
146 | +# セッションの作成 | |
147 | +sess = tf.Session() | |
148 | + | |
149 | +# 変数の初期化を実行する | |
150 | +sess.run(tf.global_variables_initializer()) | |
151 | + | |
152 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
153 | + # 存在していればそれを読み出す | |
154 | +latest_filename = tf.train.latest_checkpoint("./") | |
155 | +if latest_filename: | |
156 | + print("load saved model {}".format(latest_filename)) | |
157 | + saver.restore(sess, latest_filename) | |
158 | + | |
159 | +# サマリを取得するための処理 | |
160 | +summary_op = tf.summary.merge_all() | |
161 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
162 | + | |
163 | +# 学習用データを読み出す | |
164 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
165 | + | |
166 | + | |
167 | +## テスト用データセットを読み出す | |
168 | +# テストデータは50×3=150件 | |
169 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
170 | + .map(map_dataset)\ | |
171 | + .batch(150) | |
172 | +iterator2 = dataset2.make_one_shot_iterator() | |
173 | +item2 = iterator2.get_next() | |
174 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
175 | + | |
176 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
177 | + | |
178 | +steps = tf.train.global_step(sess, global_step) | |
179 | +if steps == 0: | |
180 | + # 初期状態を記録 | |
181 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
182 | + { x1: dataset_x, y: dataset_y }) | |
183 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
184 | + print(" ACCURACY({}): {}".format(0, acc)) | |
185 | + summary_writer.add_summary(summary, global_step=0) | |
186 | + | |
187 | +# 学習を開始 | |
188 | +start_time = time.time() | |
189 | +for i in range(30): | |
190 | + for j in range(10): | |
191 | + sess.run(minimize, {x1: dataset_x, y: dataset_y}) | |
192 | + # 途中経過を取得・保存 | |
193 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
194 | + {x1: dataset_x, y: dataset_y}) | |
195 | + acc2, summary2 = sess.run([accuracy, test_summary], | |
196 | + {x1: testdataset_x, y: testdataset_y}) | |
197 | + print("CROSS ENTROPY({}): {}".format(steps + 10 * (i+1), xe)) | |
198 | + print(" ACCURACY({}): {}".format(steps + 10 * (i+1), acc)) | |
199 | + print(" TEST RESULT({}): {}".format(steps + 10 * (i+1), acc2)) | |
200 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
201 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
202 | + | |
203 | +# 学習終了 | |
204 | +print ("time: {} sec".format(time.time() - start_time)) | |
205 | + | |
206 | +save_path = saver.save(sess, MODEL_FILE) | |
207 | +print("Model saved to {}".format(save_path)) | |
208 | + | |
209 | +## 結果の出力 | |
210 | + | |
211 | +# 学習に使用したデータを入力した場合の | |
212 | +# 正答率を計算する | |
213 | +print("----result with teaching data----") | |
214 | + | |
215 | +print("assumed label:") | |
216 | +print(sess.run(tf.argmax(x6, 1), {x1: dataset_x})) | |
217 | +print("real label:") | |
218 | +print(sess.run(tf.argmax(y, 1), {y: dataset_y})) | |
219 | +print("accuracy:", sess.run(accuracy, {x1: dataset_x, y: dataset_y})) | |
220 | + | |
221 | + | |
222 | +# テスト用データを入力した場合の | |
223 | +# 正答率を計算する | |
224 | +print("----result with test data----") | |
225 | + | |
226 | + | |
227 | +# 正答率を出力 | |
228 | +print("assumed label:") | |
229 | +print(sess.run(tf.argmax(x6, 1), {x1: testdataset_x})) | |
230 | +print("real label:") | |
231 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
232 | +print("accuracy:", sess.run(accuracy, {x1: testdataset_x, y: testdataset_y})) | |
233 | + |
@@ -0,0 +1,243 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +# 入力画像の幅・高さ・チャネル数 | |
9 | +INPUT_WIDTH = 100 | |
10 | +INPUT_HEIGHT = 100 | |
11 | +INPUT_CHANNELS = 3 | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | + | |
14 | +# 1つめの畳み込み-プーリング層のパラメータ | |
15 | +CONV1_SIZE = 5 # 畳み込みフィルタのサイズ | |
16 | +CONV1_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
17 | +CONV1_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
18 | +POOL1_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
19 | +POOL1_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
20 | + | |
21 | +# 2つめの畳み込み-プーリング層のパラメータ | |
22 | +CONV2_SIZE = 5 # 畳み込みフィルタのサイズ | |
23 | +CONV2_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
24 | +CONV2_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
25 | +POOL2_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
26 | +POOL2_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
27 | + | |
28 | +# 全結合層のサイズ | |
29 | +W5_SIZE=25 * 25 * CONV2_CHANNELS | |
30 | + | |
31 | +# 出力サイズ | |
32 | +OUTPUT_SIZE = 3 | |
33 | +LABEL_SIZE = OUTPUT_SIZE | |
34 | + | |
35 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
36 | + "../data2/teach_dog.tfrecord", | |
37 | + "../data2/teach_monkey.tfrecord"] | |
38 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
39 | + "../data2/test_dog.tfrecord", | |
40 | + "../data2/test_monkey.tfrecord"] | |
41 | +MODEL_FILE = "./cnn_model" | |
42 | + | |
43 | + | |
44 | +# 結果をそろえるために乱数の種を指定 | |
45 | +tf.set_random_seed(1111) | |
46 | + | |
47 | +## 入力と計算グラフを定義 | |
48 | +with tf.variable_scope('model') as scope: | |
49 | + | |
50 | + # 入力(=第1層)および正答を入力するプレースホルダを定義 | |
51 | + x1 = tf.placeholder(dtype=tf.float32, name="x1") | |
52 | + y = tf.placeholder(dtype=tf.float32, name="y") | |
53 | + | |
54 | + # ドロップアウト設定用のプレースホルダ | |
55 | + enable_dropout = tf.placeholder_with_default(0.0, [], name="enable_dropout") | |
56 | + | |
57 | + # ドロップアウト確率 | |
58 | + prob_one = tf.constant(1.0, dtype=tf.float32) | |
59 | + | |
60 | + # enable_dropoutが0の場合、キープ確率は1。そうでない場合、一定の確率に設定する | |
61 | + x5_keep_prob = prob_one - enable_dropout * 0.5 | |
62 | + | |
63 | + # 第2層(畳み込み処理) | |
64 | + W1 = tf.get_variable("W1", | |
65 | + shape=[CONV1_SIZE, CONV1_SIZE, INPUT_CHANNELS, CONV1_CHANNELS], | |
66 | + dtype=tf.float32, | |
67 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
68 | + | |
69 | + b1 = tf.get_variable("b1", | |
70 | + shape=[CONV1_CHANNELS], | |
71 | + dtype=tf.float32, | |
72 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
73 | + u1 = tf.nn.bias_add(tf.nn.conv2d(x1, W1, CONV1_STRIDE, "SAME"), b1, name="u1") | |
74 | + x2 = tf.nn.relu(u1, name="x2") | |
75 | + | |
76 | + # 第3層(プーリング層) | |
77 | + x3 = tf.nn.max_pool(x2, POOL1_SIZE, POOL1_STRIDE, "SAME", name="x3") | |
78 | + | |
79 | + # 第4層(畳み込み処理) | |
80 | + W3 = tf.get_variable("W3", | |
81 | + shape=[CONV2_SIZE, CONV2_SIZE, CONV1_CHANNELS, CONV2_CHANNELS], | |
82 | + dtype=tf.float32, | |
83 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
84 | + | |
85 | + b3 = tf.get_variable("b3", | |
86 | + shape=[CONV2_CHANNELS], | |
87 | + dtype=tf.float32, | |
88 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
89 | + u3 = tf.nn.bias_add(tf.nn.conv2d(x3, W3, CONV1_STRIDE, "SAME"), b3, name="u3") | |
90 | + x4 = tf.nn.relu(u3, name="x4") | |
91 | + | |
92 | + # 第5層(プーリング層) | |
93 | + x5 = tf.nn.max_pool(x4, POOL2_SIZE, POOL2_STRIDE, "SAME", name="x5") | |
94 | + x5_ = tf.reshape(x5, [-1, W5_SIZE], name="x5_") | |
95 | + x5_drop = tf.nn.dropout(x5_, x5_keep_prob, name="x5_drop") | |
96 | + | |
97 | + # 第6層(出力層) | |
98 | + W5 = tf.get_variable("W5", | |
99 | + shape=[W5_SIZE, OUTPUT_SIZE], | |
100 | + dtype=tf.float32, | |
101 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
102 | + b5 = tf.get_variable("b5", | |
103 | + shape=[OUTPUT_SIZE], | |
104 | + dtype=tf.float32, | |
105 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
106 | + x6 = tf.nn.softmax(tf.matmul(x5_drop, W5) + b5, name="x6") | |
107 | + | |
108 | + # コスト関数 | |
109 | + cross_entropy = -tf.reduce_sum(y * tf.log(x6), name="cross_entropy") | |
110 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
111 | + | |
112 | + # 正答率 | |
113 | + correct = tf.equal(tf.argmax(x6,1), tf.argmax(y, 1), name="correct") | |
114 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
115 | + tf.summary.scalar('accuracy', accuracy) | |
116 | + | |
117 | + | |
118 | + # 最適化アルゴリズムを定義 | |
119 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
120 | + optimizer = tf.train.AdamOptimizer(1e-4, name="optimizer") | |
121 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
122 | + | |
123 | + # 学習結果を保存するためのオブジェクトを用意 | |
124 | + saver = tf.train.Saver() | |
125 | + | |
126 | +# 読み込んだデータの変換用関数 | |
127 | +def map_dataset(serialized): | |
128 | + features = { | |
129 | + 'label': tf.FixedLenFeature([], tf.int64), | |
130 | + 'height': tf.FixedLenFeature([], tf.int64), | |
131 | + 'width': tf.FixedLenFeature([], tf.int64), | |
132 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
133 | + } | |
134 | + parsed = tf.parse_single_example(serialized, features) | |
135 | + | |
136 | + # 読み込んだデータを変換する | |
137 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
138 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
139 | + [raw_label, 0], | |
140 | + [1, LABEL_SIZE]), | |
141 | + [LABEL_SIZE]) | |
142 | + | |
143 | + image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
144 | + return (image, label, raw_label) | |
145 | + | |
146 | +## データセットの読み込み | |
147 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
148 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
149 | + .map(map_dataset)\ | |
150 | + .batch(600) | |
151 | + | |
152 | +# データにアクセスするためのイテレータを作成 | |
153 | +iterator = dataset.make_one_shot_iterator() | |
154 | +item = iterator.get_next() | |
155 | + | |
156 | +# セッションの作成 | |
157 | +sess = tf.Session() | |
158 | + | |
159 | +# 変数の初期化を実行する | |
160 | +sess.run(tf.global_variables_initializer()) | |
161 | + | |
162 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
163 | + # 存在していればそれを読み出す | |
164 | +latest_filename = tf.train.latest_checkpoint("./") | |
165 | +if latest_filename: | |
166 | + print("load saved model {}".format(latest_filename)) | |
167 | + saver.restore(sess, latest_filename) | |
168 | + | |
169 | +# サマリを取得するための処理 | |
170 | +summary_op = tf.summary.merge_all() | |
171 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
172 | + | |
173 | +# 学習用データを読み出す | |
174 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
175 | + | |
176 | + | |
177 | +## テスト用データセットを読み出す | |
178 | +# テストデータは50×3=150件 | |
179 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
180 | + .map(map_dataset)\ | |
181 | + .batch(150) | |
182 | +iterator2 = dataset2.make_one_shot_iterator() | |
183 | +item2 = iterator2.get_next() | |
184 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
185 | + | |
186 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
187 | + | |
188 | +steps = tf.train.global_step(sess, global_step) | |
189 | +if steps == 0: | |
190 | + # 初期状態を記録 | |
191 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
192 | + { x1: dataset_x, y: dataset_y }) | |
193 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
194 | + print(" ACCURACY({}): {}".format(0, acc)) | |
195 | + summary_writer.add_summary(summary, global_step=0) | |
196 | + | |
197 | +# 学習を開始 | |
198 | +start_time = time.time() | |
199 | +for i in range(30): | |
200 | + for j in range(10): | |
201 | + sess.run(minimize, {x1: dataset_x, y: dataset_y, enable_dropout: 1.0}) | |
202 | + # 途中経過を取得・保存 | |
203 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
204 | + {x1: dataset_x, y: dataset_y}) | |
205 | + acc2, summary2 = sess.run([accuracy, test_summary], | |
206 | + {x1: testdataset_x, y: testdataset_y}) | |
207 | + print("CROSS ENTROPY({}): {}".format(steps + 10 * (i+1), xe)) | |
208 | + print(" ACCURACY({}): {}".format(steps + 10 * (i+1), acc)) | |
209 | + print(" TEST RESULT({}): {}".format(steps + 10 * (i+1), acc2)) | |
210 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
211 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
212 | + | |
213 | +# 学習終了 | |
214 | +print ("time: {} sec".format(time.time() - start_time)) | |
215 | + | |
216 | +save_path = saver.save(sess, MODEL_FILE) | |
217 | +print("Model saved to {}".format(save_path)) | |
218 | + | |
219 | +## 結果の出力 | |
220 | + | |
221 | +# 学習に使用したデータを入力した場合の | |
222 | +# 正答率を計算する | |
223 | +print("----result with teaching data----") | |
224 | + | |
225 | +print("assumed label:") | |
226 | +print(sess.run(tf.argmax(x6, 1), {x1: dataset_x})) | |
227 | +print("real label:") | |
228 | +print(sess.run(tf.argmax(y, 1), {y: dataset_y})) | |
229 | +print("accuracy:", sess.run(accuracy, {x1: dataset_x, y: dataset_y})) | |
230 | + | |
231 | + | |
232 | +# テスト用データを入力した場合の | |
233 | +# 正答率を計算する | |
234 | +print("----result with test data----") | |
235 | + | |
236 | + | |
237 | +# 正答率を出力 | |
238 | +print("assumed label:") | |
239 | +print(sess.run(tf.argmax(x6, 1), {x1: testdataset_x})) | |
240 | +print("real label:") | |
241 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
242 | +print("accuracy:", sess.run(accuracy, {x1: testdataset_x, y: testdataset_y})) | |
243 | + |
@@ -0,0 +1,249 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +# 入力画像の幅・高さ・チャネル数 | |
9 | +INPUT_WIDTH = 100 | |
10 | +INPUT_HEIGHT = 100 | |
11 | +INPUT_CHANNELS = 3 | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | + | |
14 | +# 1つめの畳み込み-プーリング層のパラメータ | |
15 | +CONV1_SIZE = 5 # 畳み込みフィルタのサイズ | |
16 | +CONV1_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
17 | +CONV1_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
18 | +POOL1_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
19 | +POOL1_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
20 | + | |
21 | +# 2つめの畳み込み-プーリング層のパラメータ | |
22 | +CONV2_SIZE = 5 # 畳み込みフィルタのサイズ | |
23 | +CONV2_STRIDE = [1, 1, 1, 1] # 畳み込みフィルタのストライド | |
24 | +CONV2_CHANNELS = 32 # 畳み込み層の出力チャネル数 | |
25 | +POOL2_SIZE = [1, 2, 2, 1] # プーリング層のウィンドウサイズ | |
26 | +POOL2_STRIDE = [1, 2, 2, 1] # プーリングのストライド | |
27 | + | |
28 | +# 全結合層のサイズ | |
29 | +W5_SIZE=25 * 25 * CONV2_CHANNELS | |
30 | + | |
31 | +# 出力サイズ | |
32 | +OUTPUT_SIZE = 3 | |
33 | +LABEL_SIZE = OUTPUT_SIZE | |
34 | + | |
35 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
36 | + "../data2/teach_dog.tfrecord", | |
37 | + "../data2/teach_monkey.tfrecord"] | |
38 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
39 | + "../data2/test_dog.tfrecord", | |
40 | + "../data2/test_monkey.tfrecord"] | |
41 | +MODEL_FILE = "./cnn_model" | |
42 | + | |
43 | + | |
44 | +# 結果をそろえるために乱数の種を指定 | |
45 | +tf.set_random_seed(1111) | |
46 | + | |
47 | +## 入力と計算グラフを定義 | |
48 | +with tf.variable_scope('model') as scope: | |
49 | + | |
50 | + # 入力(=第1層)および正答を入力するプレースホルダを定義 | |
51 | + x1 = tf.placeholder(dtype=tf.float32, name="x1") | |
52 | + y = tf.placeholder(dtype=tf.float32, name="y") | |
53 | + | |
54 | + # ドロップアウト設定用のプレースホルダ | |
55 | + enable_dropout = tf.placeholder_with_default(0.0, [], name="enable_dropout") | |
56 | + | |
57 | + # ドロップアウト確率 | |
58 | + prob_one = tf.constant(1.0, dtype=tf.float32) | |
59 | + | |
60 | + # enable_dropoutが0の場合、キープ確率は1。そうでない場合、一定の確率に設定する | |
61 | + x5_keep_prob = prob_one - enable_dropout * 0.5 | |
62 | + | |
63 | + # 第2層(畳み込み処理) | |
64 | + W1 = tf.get_variable("W1", | |
65 | + shape=[CONV1_SIZE, CONV1_SIZE, INPUT_CHANNELS, CONV1_CHANNELS], | |
66 | + dtype=tf.float32, | |
67 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
68 | + | |
69 | + b1 = tf.get_variable("b1", | |
70 | + shape=[CONV1_CHANNELS], | |
71 | + dtype=tf.float32, | |
72 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
73 | + u1 = tf.nn.bias_add(tf.nn.conv2d(x1, W1, CONV1_STRIDE, "SAME"), b1, name="u1") | |
74 | + x2 = tf.nn.relu(u1, name="x2") | |
75 | + | |
76 | + # 第3層(プーリング層) | |
77 | + x3 = tf.nn.max_pool(x2, POOL1_SIZE, POOL1_STRIDE, "SAME", name="x3") | |
78 | + | |
79 | + # 第4層(畳み込み処理) | |
80 | + W3 = tf.get_variable("W3", | |
81 | + shape=[CONV2_SIZE, CONV2_SIZE, CONV1_CHANNELS, CONV2_CHANNELS], | |
82 | + dtype=tf.float32, | |
83 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
84 | + | |
85 | + b3 = tf.get_variable("b3", | |
86 | + shape=[CONV2_CHANNELS], | |
87 | + dtype=tf.float32, | |
88 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
89 | + u3 = tf.nn.bias_add(tf.nn.conv2d(x3, W3, CONV1_STRIDE, "SAME"), b3, name="u3") | |
90 | + x4 = tf.nn.relu(u3, name="x4") | |
91 | + | |
92 | + # 第5層(プーリング層) | |
93 | + x5 = tf.nn.max_pool(x4, POOL2_SIZE, POOL2_STRIDE, "SAME", name="x5") | |
94 | + x5_ = tf.reshape(x5, [-1, W5_SIZE], name="x5_") | |
95 | + x5_drop = tf.nn.dropout(x5_, x5_keep_prob, name="x5_drop") | |
96 | + | |
97 | + # 第6層(出力層) | |
98 | + W5 = tf.get_variable("W5", | |
99 | + shape=[W5_SIZE, OUTPUT_SIZE], | |
100 | + dtype=tf.float32, | |
101 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
102 | + b5 = tf.get_variable("b5", | |
103 | + shape=[OUTPUT_SIZE], | |
104 | + dtype=tf.float32, | |
105 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
106 | + x6 = tf.nn.softmax(tf.matmul(x5_drop, W5) + b5, name="x6") | |
107 | + | |
108 | + # コスト関数 | |
109 | + cross_entropy = -tf.reduce_sum(y * tf.log(x6), name="cross_entropy") | |
110 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
111 | + | |
112 | + # 正答率 | |
113 | + correct = tf.equal(tf.argmax(x6,1), tf.argmax(y, 1), name="correct") | |
114 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
115 | + tf.summary.scalar('accuracy', accuracy) | |
116 | + | |
117 | + | |
118 | + # 最適化アルゴリズムを定義 | |
119 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
120 | + optimizer = tf.train.AdamOptimizer(1e-4, name="optimizer") | |
121 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
122 | + | |
123 | + # 学習結果を保存するためのオブジェクトを用意 | |
124 | + saver = tf.train.Saver() | |
125 | + | |
126 | +# 読み込んだデータの変換用関数 | |
127 | +def map_dataset(serialized): | |
128 | + features = { | |
129 | + 'label': tf.FixedLenFeature([], tf.int64), | |
130 | + 'height': tf.FixedLenFeature([], tf.int64), | |
131 | + 'width': tf.FixedLenFeature([], tf.int64), | |
132 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
133 | + } | |
134 | + parsed = tf.parse_single_example(serialized, features) | |
135 | + | |
136 | + # 読み込んだデータを変換する | |
137 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
138 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
139 | + [raw_label, 0], | |
140 | + [1, LABEL_SIZE]), | |
141 | + [LABEL_SIZE]) | |
142 | + | |
143 | + image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
144 | + return (image, label, raw_label) | |
145 | + | |
146 | +## データセットの読み込み | |
147 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
148 | +dataset_size = tf.placeholder(shape=[], dtype=tf.int64) | |
149 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
150 | + .map(map_dataset)\ | |
151 | + .repeat()\ | |
152 | + .shuffle(600)\ | |
153 | + .batch(dataset_size) | |
154 | + | |
155 | +# データにアクセスするためのイテレータを作成 | |
156 | +iterator = dataset.make_initializable_iterator() | |
157 | +next_dataset = iterator.get_next() | |
158 | + | |
159 | +# セッションの作成 | |
160 | +sess = tf.Session() | |
161 | + | |
162 | +# 変数の初期化を実行する | |
163 | +sess.run(tf.global_variables_initializer()) | |
164 | + | |
165 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
166 | + # 存在していればそれを読み出す | |
167 | +latest_filename = tf.train.latest_checkpoint("./") | |
168 | +if latest_filename: | |
169 | + print("load saved model {}".format(latest_filename)) | |
170 | + saver.restore(sess, latest_filename) | |
171 | + | |
172 | +# サマリを取得するための処理 | |
173 | +summary_op = tf.summary.merge_all() | |
174 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
175 | + | |
176 | +# 教師データに対する正答率の取得用に学習用データを読み出しておく | |
177 | +sess.run(iterator.initializer, {dataset_size: 600}) | |
178 | +(dataset_all_x, dataset_all_y, values_all_y) = sess.run(next_dataset) | |
179 | + | |
180 | + | |
181 | +## テスト用データセットを読み出す | |
182 | +# テストデータは50×3=150件 | |
183 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
184 | + .map(map_dataset)\ | |
185 | + .batch(150) | |
186 | +iterator2 = dataset2.make_one_shot_iterator() | |
187 | +item2 = iterator2.get_next() | |
188 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
189 | + | |
190 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
191 | + | |
192 | +steps = tf.train.global_step(sess, global_step) | |
193 | +if steps == 0: | |
194 | + # 初期状態を記録 | |
195 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
196 | + { x1: dataset_all_x, y: dataset_all_y }) | |
197 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
198 | + print(" ACCURACY({}): {}".format(0, acc)) | |
199 | + summary_writer.add_summary(summary, global_step=0) | |
200 | + | |
201 | +# 学習を開始 | |
202 | +start_time = time.time() | |
203 | +sess.run(iterator.initializer, {dataset_size: 100}) | |
204 | +for i in range(90): | |
205 | + for j in range(10): | |
206 | + (dataset_x, dataset_y, values_y) = sess.run(next_dataset) | |
207 | + sess.run(minimize, {x1: dataset_x, y: dataset_y, enable_dropout: 1.0}) | |
208 | + # 途中経過を取得・保存 | |
209 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
210 | + {x1: dataset_all_x, y: dataset_all_y}) | |
211 | + acc2, summary2 = sess.run([accuracy, test_summary], | |
212 | + {x1: testdataset_x, y: testdataset_y}) | |
213 | + print("CROSS ENTROPY({}): {}".format(steps + 10 * (i+1), xe)) | |
214 | + print(" ACCURACY({}): {}".format(steps + 10 * (i+1), acc)) | |
215 | + print(" TEST RESULT({}): {}".format(steps + 10 * (i+1), acc2)) | |
216 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
217 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
218 | + | |
219 | +# 学習終了 | |
220 | +print ("time: {} sec".format(time.time() - start_time)) | |
221 | + | |
222 | +save_path = saver.save(sess, MODEL_FILE) | |
223 | +print("Model saved to {}".format(save_path)) | |
224 | + | |
225 | +## 結果の出力 | |
226 | + | |
227 | +# 学習に使用したデータを入力した場合の | |
228 | +# 正答率を計算する | |
229 | +print("----result with teaching data----") | |
230 | + | |
231 | +print("assumed label:") | |
232 | +print(sess.run(tf.argmax(x6, 1), {x1: dataset_all_x})) | |
233 | +print("real label:") | |
234 | +print(sess.run(tf.argmax(y, 1), {y: dataset_all_y})) | |
235 | +print("accuracy:", sess.run(accuracy, {x1: dataset_all_x, y: dataset_all_y})) | |
236 | + | |
237 | + | |
238 | +# テスト用データを入力した場合の | |
239 | +# 正答率を計算する | |
240 | +print("----result with test data----") | |
241 | + | |
242 | + | |
243 | +# 正答率を出力 | |
244 | +print("assumed label:") | |
245 | +print(sess.run(tf.argmax(x6, 1), {x1: testdataset_x})) | |
246 | +print("real label:") | |
247 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
248 | +print("accuracy:", sess.run(accuracy, {x1: testdataset_x, y: testdataset_y})) | |
249 | + |
@@ -0,0 +1,203 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os.path | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +INPUT_WIDTH = 100 | |
9 | +INPUT_HEIGHT = 100 | |
10 | +INPUT_CHANNELS = 3 | |
11 | + | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | +W1_SIZE = 100 | |
14 | +W2_SIZE = 100 | |
15 | +OUTPUT_SIZE = 3 | |
16 | +LABEL_SIZE = OUTPUT_SIZE | |
17 | + | |
18 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
19 | + "../data2/teach_dog.tfrecord", | |
20 | + "../data2/teach_monkey.tfrecord"] | |
21 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
22 | + "../data2/test_dog.tfrecord", | |
23 | + "../data2/test_monkey.tfrecord"] | |
24 | + | |
25 | +MODEL_NAME = "./deep_model" | |
26 | + | |
27 | +# 結果をそろえるために乱数の種を指定 | |
28 | +tf.set_random_seed(1111) | |
29 | + | |
30 | +## 入力と計算グラフを定義 | |
31 | +with tf.variable_scope('model') as scope: | |
32 | + x1 = tf.placeholder(dtype=tf.float32) | |
33 | + y = tf.placeholder(dtype=tf.float32) | |
34 | + | |
35 | + # 第2層 | |
36 | + W1 = tf.get_variable("W1", | |
37 | + shape=[INPUT_SIZE, W1_SIZE], | |
38 | + dtype=tf.float32, | |
39 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
40 | + b1 = tf.get_variable("b1", | |
41 | + shape=[W1_SIZE], | |
42 | + dtype=tf.float32, | |
43 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
44 | + x2 = tf.nn.relu(tf.matmul(x1, W1) + b1, name="x2") | |
45 | + | |
46 | + # 第3層 | |
47 | + W2 = tf.get_variable("W2", | |
48 | + shape=[W1_SIZE, W2_SIZE], | |
49 | + dtype=tf.float32, | |
50 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
51 | + b2 = tf.get_variable("b2", | |
52 | + shape=[W2_SIZE], | |
53 | + dtype=tf.float32, | |
54 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
55 | + x3 = tf.nn.relu(tf.matmul(x2, W2) + b2, name="x3") | |
56 | + | |
57 | + # 第4層 | |
58 | + W3 = tf.get_variable("W3", | |
59 | + shape=[W2_SIZE, OUTPUT_SIZE], | |
60 | + dtype=tf.float32, | |
61 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
62 | + b3 = tf.get_variable("b3", | |
63 | + shape=[OUTPUT_SIZE], | |
64 | + dtype=tf.float32, | |
65 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
66 | + x4 = tf.nn.softmax(tf.matmul(x3, W3) + b3, name="x4") | |
67 | + | |
68 | + # コスト関数 | |
69 | + cross_entropy = -tf.reduce_sum(y * tf.log(x4), name="cross_entropy") | |
70 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
71 | + | |
72 | + # 正答率 | |
73 | + # 出力テンソルの中でもっとも値が大きいもののインデックスが | |
74 | + # 正答と等しいかどうかを計算する | |
75 | + correct = tf.equal(tf.argmax(x4,1), tf.argmax(y, 1), name="correct") | |
76 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
77 | + tf.summary.scalar('accuracy', accuracy) | |
78 | + | |
79 | + # 最適化アルゴリズムを定義 | |
80 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
81 | + optimizer = tf.train.GradientDescentOptimizer(1e-6, name="optimizer") | |
82 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
83 | + | |
84 | + # 学習結果を保存するためのオブジェクトを用意 | |
85 | + saver = tf.train.Saver() | |
86 | + | |
87 | + | |
88 | +# 読み込んだデータの変換用関数 | |
89 | +def map_dataset(serialized): | |
90 | + features = { | |
91 | + 'label': tf.FixedLenFeature([], tf.int64), | |
92 | + 'height': tf.FixedLenFeature([], tf.int64), | |
93 | + 'width': tf.FixedLenFeature([], tf.int64), | |
94 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
95 | + } | |
96 | + parsed = tf.parse_single_example(serialized, features) | |
97 | + | |
98 | + # 読み込んだデータを変換する | |
99 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
100 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
101 | + [raw_label, 0], | |
102 | + [1, LABEL_SIZE]), | |
103 | + [LABEL_SIZE]) | |
104 | + | |
105 | + #image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
106 | + image = parsed['raw_image'] | |
107 | + return (image, label, raw_label) | |
108 | + | |
109 | +## データセットの読み込み | |
110 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
111 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
112 | + .map(map_dataset)\ | |
113 | + .batch(600) | |
114 | + | |
115 | +# データにアクセスするためのイテレータを作成 | |
116 | +iterator = dataset.make_one_shot_iterator() | |
117 | +item = iterator.get_next() | |
118 | + | |
119 | +# セッションの作成 | |
120 | +sess = tf.Session() | |
121 | + | |
122 | +# 変数の初期化を実行する | |
123 | +sess.run(tf.global_variables_initializer()) | |
124 | + | |
125 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
126 | + # 存在していればそれを読み出す | |
127 | +latest_filename = tf.train.latest_checkpoint("./") | |
128 | +if latest_filename: | |
129 | + print("load saved model {}".format(latest_filename)) | |
130 | + saver.restore(sess, latest_filename) | |
131 | + | |
132 | +# サマリを取得するための処理 | |
133 | +summary_op = tf.summary.merge_all() | |
134 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
135 | + | |
136 | +# 学習用データを読み出す | |
137 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
138 | +steps = tf.train.global_step(sess, global_step) | |
139 | + | |
140 | +## テスト用データセットを読み出す | |
141 | +# テストデータは50×3=150件 | |
142 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
143 | + .map(map_dataset)\ | |
144 | + .batch(150) | |
145 | +iterator2 = dataset2.make_one_shot_iterator() | |
146 | +item2 = iterator2.get_next() | |
147 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
148 | + | |
149 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
150 | + | |
151 | +if steps == 0: | |
152 | + # 初期状態を記録 | |
153 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
154 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
155 | + print(" ACCURACY({}): {}".format(0, acc)) | |
156 | + summary_writer.add_summary(summary, global_step=0) | |
157 | + | |
158 | +# 学習を開始 | |
159 | +start_time = time.time() | |
160 | +for i in range(30): | |
161 | + for j in range(100): | |
162 | + sess.run(minimize, {x1: dataset_x, y: dataset_y}) | |
163 | + | |
164 | + # 途中経過を取得・保存 | |
165 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
166 | + acc2, summary2 = sess.run([accuracy, test_summary], {x1: testdataset_x, y: testdataset_y}) | |
167 | + print("CROSS ENTROPY({}): {}".format(steps + 100 * (i+1), xe)) | |
168 | + print(" ACCURACY({}): {}".format(steps + 100 * (i+1), acc)) | |
169 | + print(" TEST RESULT({}): {}".format(steps + 100 * (i+1), acc2)) | |
170 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
171 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
172 | + | |
173 | +# 学習終了 | |
174 | +print ("time: {} sec".format(time.time() - start_time)) | |
175 | + | |
176 | +save_path = saver.save(sess, MODEL_NAME, global_step=tf.train.global_step(sess, global_step)) | |
177 | +print("Model saved to {}".format(save_path)) | |
178 | + | |
179 | +## 結果の出力 | |
180 | + | |
181 | +# 学習に使用したデータを入力した場合の | |
182 | +# 正答率を計算する | |
183 | +print("----result with teaching data----") | |
184 | + | |
185 | +print("assumed label:") | |
186 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: dataset_x})) | |
187 | +print("real label:") | |
188 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: dataset_y})) | |
189 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: dataset_x, y: dataset_y})) | |
190 | + | |
191 | + | |
192 | +# テスト用データを入力した場合の | |
193 | +# 正答率を計算する | |
194 | +print("----result with test data----") | |
195 | + | |
196 | + | |
197 | +# 正答率を出力 | |
198 | +print("assumed label:") | |
199 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: testdataset_x})) | |
200 | +print("real label:") | |
201 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
202 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: testdataset_x, y: testdataset_y})) | |
203 | + |
@@ -0,0 +1,203 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os.path | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +INPUT_WIDTH = 100 | |
9 | +INPUT_HEIGHT = 100 | |
10 | +INPUT_CHANNELS = 3 | |
11 | + | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | +W1_SIZE = 100 | |
14 | +W2_SIZE = 100 | |
15 | +OUTPUT_SIZE = 3 | |
16 | +LABEL_SIZE = OUTPUT_SIZE | |
17 | + | |
18 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
19 | + "../data2/teach_dog.tfrecord", | |
20 | + "../data2/teach_monkey.tfrecord"] | |
21 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
22 | + "../data2/test_dog.tfrecord", | |
23 | + "../data2/test_monkey.tfrecord"] | |
24 | + | |
25 | +MODEL_NAME = "./deep_model" | |
26 | + | |
27 | +# 結果をそろえるために乱数の種を指定 | |
28 | +tf.set_random_seed(1111) | |
29 | + | |
30 | +## 入力と計算グラフを定義 | |
31 | +with tf.variable_scope('model') as scope: | |
32 | + x1 = tf.placeholder(dtype=tf.float32) | |
33 | + y = tf.placeholder(dtype=tf.float32) | |
34 | + | |
35 | + # 第2層 | |
36 | + W1 = tf.get_variable("W1", | |
37 | + shape=[INPUT_SIZE, W1_SIZE], | |
38 | + dtype=tf.float32, | |
39 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
40 | + b1 = tf.get_variable("b1", | |
41 | + shape=[W1_SIZE], | |
42 | + dtype=tf.float32, | |
43 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
44 | + x2 = tf.nn.relu(tf.matmul(x1, W1) + b1, name="x2") | |
45 | + | |
46 | + # 第3層 | |
47 | + W2 = tf.get_variable("W2", | |
48 | + shape=[W1_SIZE, W2_SIZE], | |
49 | + dtype=tf.float32, | |
50 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
51 | + b2 = tf.get_variable("b2", | |
52 | + shape=[W2_SIZE], | |
53 | + dtype=tf.float32, | |
54 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
55 | + x3 = tf.nn.relu(tf.matmul(x2, W2) + b2, name="x3") | |
56 | + | |
57 | + # 第4層 | |
58 | + W3 = tf.get_variable("W3", | |
59 | + shape=[W2_SIZE, OUTPUT_SIZE], | |
60 | + dtype=tf.float32, | |
61 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
62 | + b3 = tf.get_variable("b3", | |
63 | + shape=[OUTPUT_SIZE], | |
64 | + dtype=tf.float32, | |
65 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
66 | + x4 = tf.nn.softmax(tf.matmul(x3, W3) + b3, name="x4") | |
67 | + | |
68 | + # コスト関数 | |
69 | + cross_entropy = -tf.reduce_sum(y * tf.log(x4), name="cross_entropy") | |
70 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
71 | + | |
72 | + # 正答率 | |
73 | + # 出力テンソルの中でもっとも値が大きいもののインデックスが | |
74 | + # 正答と等しいかどうかを計算する | |
75 | + correct = tf.equal(tf.argmax(x4,1), tf.argmax(y, 1), name="correct") | |
76 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
77 | + tf.summary.scalar('accuracy', accuracy) | |
78 | + | |
79 | + # 最適化アルゴリズムを定義 | |
80 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
81 | + optimizer = tf.train.AdamOptimizer(1e-4, name="optimizer") | |
82 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
83 | + | |
84 | + # 学習結果を保存するためのオブジェクトを用意 | |
85 | + saver = tf.train.Saver() | |
86 | + | |
87 | + | |
88 | +# 読み込んだデータの変換用関数 | |
89 | +def map_dataset(serialized): | |
90 | + features = { | |
91 | + 'label': tf.FixedLenFeature([], tf.int64), | |
92 | + 'height': tf.FixedLenFeature([], tf.int64), | |
93 | + 'width': tf.FixedLenFeature([], tf.int64), | |
94 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
95 | + } | |
96 | + parsed = tf.parse_single_example(serialized, features) | |
97 | + | |
98 | + # 読み込んだデータを変換する | |
99 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
100 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
101 | + [raw_label, 0], | |
102 | + [1, LABEL_SIZE]), | |
103 | + [LABEL_SIZE]) | |
104 | + | |
105 | + #image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
106 | + image = parsed['raw_image'] | |
107 | + return (image, label, raw_label) | |
108 | + | |
109 | +## データセットの読み込み | |
110 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
111 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
112 | + .map(map_dataset)\ | |
113 | + .batch(600) | |
114 | + | |
115 | +# データにアクセスするためのイテレータを作成 | |
116 | +iterator = dataset.make_one_shot_iterator() | |
117 | +item = iterator.get_next() | |
118 | + | |
119 | +# セッションの作成 | |
120 | +sess = tf.Session() | |
121 | + | |
122 | +# 変数の初期化を実行する | |
123 | +sess.run(tf.global_variables_initializer()) | |
124 | + | |
125 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
126 | + # 存在していればそれを読み出す | |
127 | +latest_filename = tf.train.latest_checkpoint("./") | |
128 | +if latest_filename: | |
129 | + print("load saved model {}".format(latest_filename)) | |
130 | + saver.restore(sess, latest_filename) | |
131 | + | |
132 | +# サマリを取得するための処理 | |
133 | +summary_op = tf.summary.merge_all() | |
134 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
135 | + | |
136 | +# 学習用データを読み出す | |
137 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
138 | +steps = tf.train.global_step(sess, global_step) | |
139 | + | |
140 | +## テスト用データセットを読み出す | |
141 | +# テストデータは50×3=150件 | |
142 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
143 | + .map(map_dataset)\ | |
144 | + .batch(150) | |
145 | +iterator2 = dataset2.make_one_shot_iterator() | |
146 | +item2 = iterator2.get_next() | |
147 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
148 | + | |
149 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
150 | + | |
151 | +if steps == 0: | |
152 | + # 初期状態を記録 | |
153 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
154 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
155 | + print(" ACCURACY({}): {}".format(0, acc)) | |
156 | + summary_writer.add_summary(summary, global_step=0) | |
157 | + | |
158 | +# 学習を開始 | |
159 | +start_time = time.time() | |
160 | +for i in range(30): | |
161 | + for j in range(100): | |
162 | + sess.run(minimize, {x1: dataset_x, y: dataset_y}) | |
163 | + | |
164 | + # 途中経過を取得・保存 | |
165 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
166 | + acc2, summary2 = sess.run([accuracy, test_summary], {x1: testdataset_x, y: testdataset_y}) | |
167 | + print("CROSS ENTROPY({}): {}".format(steps + 100 * (i+1), xe)) | |
168 | + print(" ACCURACY({}): {}".format(steps + 100 * (i+1), acc)) | |
169 | + print(" TEST RESULT({}): {}".format(steps + 100 * (i+1), acc2)) | |
170 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
171 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
172 | + | |
173 | +# 学習終了 | |
174 | +print ("time: {} sec".format(time.time() - start_time)) | |
175 | + | |
176 | +save_path = saver.save(sess, MODEL_NAME, global_step=tf.train.global_step(sess, global_step)) | |
177 | +print("Model saved to {}".format(save_path)) | |
178 | + | |
179 | +## 結果の出力 | |
180 | + | |
181 | +# 学習に使用したデータを入力した場合の | |
182 | +# 正答率を計算する | |
183 | +print("----result with teaching data----") | |
184 | + | |
185 | +print("assumed label:") | |
186 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: dataset_x})) | |
187 | +print("real label:") | |
188 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: dataset_y})) | |
189 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: dataset_x, y: dataset_y})) | |
190 | + | |
191 | + | |
192 | +# テスト用データを入力した場合の | |
193 | +# 正答率を計算する | |
194 | +print("----result with test data----") | |
195 | + | |
196 | + | |
197 | +# 正答率を出力 | |
198 | +print("assumed label:") | |
199 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: testdataset_x})) | |
200 | +print("real label:") | |
201 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
202 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: testdataset_x, y: testdataset_y})) | |
203 | + |
@@ -0,0 +1,203 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os.path | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +INPUT_WIDTH = 100 | |
9 | +INPUT_HEIGHT = 100 | |
10 | +INPUT_CHANNELS = 3 | |
11 | + | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | +W1_SIZE = 1000 | |
14 | +W2_SIZE = 1000 | |
15 | +OUTPUT_SIZE = 3 | |
16 | +LABEL_SIZE = OUTPUT_SIZE | |
17 | + | |
18 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
19 | + "../data2/teach_dog.tfrecord", | |
20 | + "../data2/teach_monkey.tfrecord"] | |
21 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
22 | + "../data2/test_dog.tfrecord", | |
23 | + "../data2/test_monkey.tfrecord"] | |
24 | + | |
25 | +MODEL_NAME = "./deep_model" | |
26 | + | |
27 | +# 結果をそろえるために乱数の種を指定 | |
28 | +tf.set_random_seed(1111) | |
29 | + | |
30 | +## 入力と計算グラフを定義 | |
31 | +with tf.variable_scope('model') as scope: | |
32 | + x1 = tf.placeholder(dtype=tf.float32) | |
33 | + y = tf.placeholder(dtype=tf.float32) | |
34 | + | |
35 | + # 第2層 | |
36 | + W1 = tf.get_variable("W1", | |
37 | + shape=[INPUT_SIZE, W1_SIZE], | |
38 | + dtype=tf.float32, | |
39 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
40 | + b1 = tf.get_variable("b1", | |
41 | + shape=[W1_SIZE], | |
42 | + dtype=tf.float32, | |
43 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
44 | + x2 = tf.nn.relu(tf.matmul(x1, W1) + b1, name="x2") | |
45 | + | |
46 | + # 第3層 | |
47 | + W2 = tf.get_variable("W2", | |
48 | + shape=[W1_SIZE, W2_SIZE], | |
49 | + dtype=tf.float32, | |
50 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
51 | + b2 = tf.get_variable("b2", | |
52 | + shape=[W2_SIZE], | |
53 | + dtype=tf.float32, | |
54 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
55 | + x3 = tf.nn.relu(tf.matmul(x2, W2) + b2, name="x3") | |
56 | + | |
57 | + # 第4層 | |
58 | + W3 = tf.get_variable("W3", | |
59 | + shape=[W2_SIZE, OUTPUT_SIZE], | |
60 | + dtype=tf.float32, | |
61 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
62 | + b3 = tf.get_variable("b3", | |
63 | + shape=[OUTPUT_SIZE], | |
64 | + dtype=tf.float32, | |
65 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
66 | + x4 = tf.nn.softmax(tf.matmul(x3, W3) + b3, name="x4") | |
67 | + | |
68 | + # コスト関数 | |
69 | + cross_entropy = -tf.reduce_sum(y * tf.log(x4), name="cross_entropy") | |
70 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
71 | + | |
72 | + # 正答率 | |
73 | + # 出力テンソルの中でもっとも値が大きいもののインデックスが | |
74 | + # 正答と等しいかどうかを計算する | |
75 | + correct = tf.equal(tf.argmax(x4,1), tf.argmax(y, 1), name="correct") | |
76 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
77 | + tf.summary.scalar('accuracy', accuracy) | |
78 | + | |
79 | + # 最適化アルゴリズムを定義 | |
80 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
81 | + optimizer = tf.train.AdamOptimizer(1e-6, name="optimizer") | |
82 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
83 | + | |
84 | + # 学習結果を保存するためのオブジェクトを用意 | |
85 | + saver = tf.train.Saver() | |
86 | + | |
87 | + | |
88 | +# 読み込んだデータの変換用関数 | |
89 | +def map_dataset(serialized): | |
90 | + features = { | |
91 | + 'label': tf.FixedLenFeature([], tf.int64), | |
92 | + 'height': tf.FixedLenFeature([], tf.int64), | |
93 | + 'width': tf.FixedLenFeature([], tf.int64), | |
94 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
95 | + } | |
96 | + parsed = tf.parse_single_example(serialized, features) | |
97 | + | |
98 | + # 読み込んだデータを変換する | |
99 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
100 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
101 | + [raw_label, 0], | |
102 | + [1, LABEL_SIZE]), | |
103 | + [LABEL_SIZE]) | |
104 | + | |
105 | + #image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
106 | + image = parsed['raw_image'] | |
107 | + return (image, label, raw_label) | |
108 | + | |
109 | +## データセットの読み込み | |
110 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
111 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
112 | + .map(map_dataset)\ | |
113 | + .batch(600) | |
114 | + | |
115 | +# データにアクセスするためのイテレータを作成 | |
116 | +iterator = dataset.make_one_shot_iterator() | |
117 | +item = iterator.get_next() | |
118 | + | |
119 | +# セッションの作成 | |
120 | +sess = tf.Session() | |
121 | + | |
122 | +# 変数の初期化を実行する | |
123 | +sess.run(tf.global_variables_initializer()) | |
124 | + | |
125 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
126 | + # 存在していればそれを読み出す | |
127 | +latest_filename = tf.train.latest_checkpoint("./") | |
128 | +if latest_filename: | |
129 | + print("load saved model {}".format(latest_filename)) | |
130 | + saver.restore(sess, latest_filename) | |
131 | + | |
132 | +# サマリを取得するための処理 | |
133 | +summary_op = tf.summary.merge_all() | |
134 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
135 | + | |
136 | +# 学習用データを読み出す | |
137 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
138 | +steps = tf.train.global_step(sess, global_step) | |
139 | + | |
140 | +## テスト用データセットを読み出す | |
141 | +# テストデータは50×3=150件 | |
142 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
143 | + .map(map_dataset)\ | |
144 | + .batch(150) | |
145 | +iterator2 = dataset2.make_one_shot_iterator() | |
146 | +item2 = iterator2.get_next() | |
147 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
148 | + | |
149 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
150 | + | |
151 | +if steps == 0: | |
152 | + # 初期状態を記録 | |
153 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
154 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
155 | + print(" ACCURACY({}): {}".format(0, acc)) | |
156 | + summary_writer.add_summary(summary, global_step=0) | |
157 | + | |
158 | +# 学習を開始 | |
159 | +start_time = time.time() | |
160 | +for i in range(30): | |
161 | + for j in range(100): | |
162 | + sess.run(minimize, {x1: dataset_x, y: dataset_y}) | |
163 | + | |
164 | + # 途中経過を取得・保存 | |
165 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], {x1: dataset_x, y: dataset_y}) | |
166 | + acc2, summary2 = sess.run([accuracy, test_summary], {x1: testdataset_x, y: testdataset_y}) | |
167 | + print("CROSS ENTROPY({}): {}".format(steps + 100 * (i+1), xe)) | |
168 | + print(" ACCURACY({}): {}".format(steps + 100 * (i+1), acc)) | |
169 | + print(" TEST RESULT({}): {}".format(steps + 100 * (i+1), acc2)) | |
170 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
171 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
172 | + | |
173 | +# 学習終了 | |
174 | +print ("time: {} sec".format(time.time() - start_time)) | |
175 | + | |
176 | +save_path = saver.save(sess, MODEL_NAME, global_step=tf.train.global_step(sess, global_step)) | |
177 | +print("Model saved to {}".format(save_path)) | |
178 | + | |
179 | +## 結果の出力 | |
180 | + | |
181 | +# 学習に使用したデータを入力した場合の | |
182 | +# 正答率を計算する | |
183 | +print("----result with teaching data----") | |
184 | + | |
185 | +print("assumed label:") | |
186 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: dataset_x})) | |
187 | +print("real label:") | |
188 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: dataset_y})) | |
189 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: dataset_x, y: dataset_y})) | |
190 | + | |
191 | + | |
192 | +# テスト用データを入力した場合の | |
193 | +# 正答率を計算する | |
194 | +print("----result with test data----") | |
195 | + | |
196 | + | |
197 | +# 正答率を出力 | |
198 | +print("assumed label:") | |
199 | +print(sess.run(tf.argmax(x4, 1), feed_dict={x1: testdataset_x})) | |
200 | +print("real label:") | |
201 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
202 | +print("accuracy:", sess.run(accuracy, feed_dict={x1: testdataset_x, y: testdataset_y})) | |
203 | + |
@@ -0,0 +1,231 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os.path | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +INPUT_WIDTH = 100 | |
9 | +INPUT_HEIGHT = 100 | |
10 | +INPUT_CHANNELS = 3 | |
11 | + | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | +W1_SIZE = 100 | |
14 | +W2_SIZE = 100 | |
15 | +OUTPUT_SIZE = 3 | |
16 | +LABEL_SIZE = OUTPUT_SIZE | |
17 | + | |
18 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
19 | + "../data2/teach_dog.tfrecord", | |
20 | + "../data2/teach_monkey.tfrecord"] | |
21 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
22 | + "../data2/test_dog.tfrecord", | |
23 | + "../data2/test_monkey.tfrecord"] | |
24 | + | |
25 | +MODEL_NAME = "./deep_model" | |
26 | + | |
27 | + | |
28 | +# 結果をそろえるために乱数の種を指定 | |
29 | +tf.set_random_seed(1111) | |
30 | + | |
31 | +## 入力と計算グラフを定義 | |
32 | +with tf.variable_scope('model') as scope: | |
33 | + x1 = tf.placeholder(dtype=tf.float32) | |
34 | + y = tf.placeholder(dtype=tf.float32) | |
35 | + | |
36 | + # ドロップアウト設定用のプレースホルダ | |
37 | + enable_dropout = tf.placeholder_with_default(0.0, [], name="enable_dropout") | |
38 | + | |
39 | + # ドロップアウト確率 | |
40 | + prob_one = tf.constant(1.0, dtype=tf.float32) | |
41 | + | |
42 | + # enable_dropoutが0の場合、キープ確率は1。そうでない場合、一定の確率に設定する | |
43 | + x1_keep_prob = prob_one - enable_dropout * 0.2 | |
44 | + x2_keep_prob = prob_one - enable_dropout * 0.5 | |
45 | + x3_keep_prob = prob_one - enable_dropout * 0.5 | |
46 | + | |
47 | + # 第1層のドロップアウトを設定 | |
48 | + x1_drop = tf.nn.dropout(x1, x1_keep_prob) | |
49 | + | |
50 | + # 第2層 | |
51 | + W1 = tf.get_variable("W1", | |
52 | + shape=[INPUT_SIZE, W1_SIZE], | |
53 | + dtype=tf.float32, | |
54 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
55 | + b1 = tf.get_variable("b1", | |
56 | + shape=[W1_SIZE], | |
57 | + dtype=tf.float32, | |
58 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
59 | + x2 = tf.nn.relu(tf.matmul(x1_drop, W1) + b1, name="x2") | |
60 | + | |
61 | + # 第2層のドロップアウトを設定 | |
62 | + x2_drop = tf.nn.dropout(x2, x2_keep_prob) | |
63 | + | |
64 | + # 第3層 | |
65 | + W2 = tf.get_variable("W2", | |
66 | + shape=[W1_SIZE, W2_SIZE], | |
67 | + dtype=tf.float32, | |
68 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
69 | + b2 = tf.get_variable("b2", | |
70 | + shape=[W2_SIZE], | |
71 | + dtype=tf.float32, | |
72 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
73 | + x3 = tf.nn.relu(tf.matmul(x2_drop, W2) + b2, name="x3") | |
74 | + | |
75 | + # 第3層のドロップアウトを設定 | |
76 | + x3_drop = tf.nn.dropout(x3, x3_keep_prob) | |
77 | + | |
78 | + # 第4層 | |
79 | + W3 = tf.get_variable("W3", | |
80 | + shape=[W2_SIZE, OUTPUT_SIZE], | |
81 | + dtype=tf.float32, | |
82 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
83 | + b3 = tf.get_variable("b3", | |
84 | + shape=[OUTPUT_SIZE], | |
85 | + dtype=tf.float32, | |
86 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
87 | + x4 = tf.nn.softmax(tf.matmul(x3_drop, W3) + b3, name="x4") | |
88 | + | |
89 | + # コスト関数 | |
90 | + cross_entropy = -tf.reduce_sum(y * tf.log(x4), name="cross_entropy") | |
91 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
92 | + | |
93 | + # 正答率 | |
94 | + # 出力テンソルの中でもっとも値が大きいもののインデックスが | |
95 | + # 正答と等しいかどうかを計算する | |
96 | + correct = tf.equal(tf.argmax(x4,1), tf.argmax(y, 1), name="correct") | |
97 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
98 | + tf.summary.scalar('accuracy', accuracy) | |
99 | + | |
100 | + # 最適化アルゴリズムを定義 | |
101 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
102 | + optimizer = tf.train.AdamOptimizer(1e-5, name="optimizer") | |
103 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
104 | + | |
105 | + # 学習結果を保存するためのオブジェクトを用意 | |
106 | + saver = tf.train.Saver() | |
107 | + | |
108 | + | |
109 | +# 読み込んだデータの変換用関数 | |
110 | +def map_dataset(serialized): | |
111 | + features = { | |
112 | + 'label': tf.FixedLenFeature([], tf.int64), | |
113 | + 'height': tf.FixedLenFeature([], tf.int64), | |
114 | + 'width': tf.FixedLenFeature([], tf.int64), | |
115 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
116 | + } | |
117 | + parsed = tf.parse_single_example(serialized, features) | |
118 | + | |
119 | + # 読み込んだデータを変換する | |
120 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
121 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
122 | + [raw_label, 0], | |
123 | + [1, LABEL_SIZE]), | |
124 | + [LABEL_SIZE]) | |
125 | + | |
126 | + #image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
127 | + image = parsed['raw_image'] | |
128 | + return (image, label, raw_label) | |
129 | + | |
130 | +## データセットの読み込み | |
131 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
132 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
133 | + .map(map_dataset)\ | |
134 | + .batch(600) | |
135 | + | |
136 | +# データにアクセスするためのイテレータを作成 | |
137 | +iterator = dataset.make_one_shot_iterator() | |
138 | +item = iterator.get_next() | |
139 | + | |
140 | +# セッションの作成 | |
141 | +sess = tf.Session() | |
142 | + | |
143 | +# 変数の初期化を実行する | |
144 | +sess.run(tf.global_variables_initializer()) | |
145 | + | |
146 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
147 | + # 存在していればそれを読み出す | |
148 | +latest_filename = tf.train.latest_checkpoint("./") | |
149 | +if latest_filename: | |
150 | + print("load saved model {}".format(latest_filename)) | |
151 | + saver.restore(sess, latest_filename) | |
152 | + | |
153 | +# サマリを取得するための処理 | |
154 | +summary_op = tf.summary.merge_all() | |
155 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
156 | + | |
157 | +# 学習用データを読み出す | |
158 | +(dataset_x, dataset_y, values_y) = sess.run(item) | |
159 | + | |
160 | +steps = tf.train.global_step(sess, global_step) | |
161 | + | |
162 | +## テスト用データセットを読み出す | |
163 | +# テストデータは50×3=150件 | |
164 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
165 | + .map(map_dataset)\ | |
166 | + .batch(150) | |
167 | +iterator2 = dataset2.make_one_shot_iterator() | |
168 | +item2 = iterator2.get_next() | |
169 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
170 | + | |
171 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
172 | + | |
173 | +if steps == 0: | |
174 | + # 初期状態を記録 | |
175 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
176 | + { x1: dataset_x, y: dataset_y }) | |
177 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
178 | + print(" ACCURACY({}): {}".format(0, acc)) | |
179 | + summary_writer.add_summary(summary, global_step=0) | |
180 | + | |
181 | +# 学習を開始 | |
182 | +start_time = time.time() | |
183 | +for i in range(30): | |
184 | + for j in range(100): | |
185 | + sess.run(minimize, | |
186 | + {x1: dataset_x, | |
187 | + y: dataset_y, | |
188 | + enable_dropout: 1.0}) | |
189 | + | |
190 | + # 途中経過を取得・保存 | |
191 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
192 | + {x1: dataset_x, y: dataset_y}) | |
193 | + acc2, summary2 = sess.run([accuracy, test_summary], | |
194 | + {x1: testdataset_x, y: testdataset_y}) | |
195 | + print("CROSS ENTROPY({}): {}".format(steps + 100 * (i+1), xe)) | |
196 | + print(" ACCURACY({}): {}".format(steps + 100 * (i+1), acc)) | |
197 | + print(" TEST RESULT({}): {}".format(steps + 100 * (i+1), acc2)) | |
198 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
199 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
200 | + | |
201 | +# 学習終了 | |
202 | +print ("time: {} sec".format(time.time() - start_time)) | |
203 | + | |
204 | +save_path = saver.save(sess, MODEL_NAME, global_step=tf.train.global_step(sess, global_step)) | |
205 | +print("Model saved to {}".format(save_path)) | |
206 | + | |
207 | +## 結果の出力 | |
208 | + | |
209 | +# 学習に使用したデータを入力した場合の | |
210 | +# 正答率を計算する | |
211 | +print("----result with teaching data----") | |
212 | + | |
213 | +print("assumed label:") | |
214 | +print(sess.run(tf.argmax(x4, 1), {x1: dataset_x})) | |
215 | +print("real label:") | |
216 | +print(sess.run(tf.argmax(y, 1), {y: dataset_y})) | |
217 | +print("accuracy:", sess.run(accuracy, {x1: dataset_x, y: dataset_y})) | |
218 | + | |
219 | + | |
220 | +# テスト用データを入力した場合の | |
221 | +# 正答率を計算する | |
222 | +print("----result with test data----") | |
223 | + | |
224 | + | |
225 | +# 正答率を出力 | |
226 | +print("assumed label:") | |
227 | +print(sess.run(tf.argmax(x4, 1), {x1: testdataset_x})) | |
228 | +print("real label:") | |
229 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
230 | +print("accuracy:", sess.run(accuracy, {x1: testdataset_x, y: testdataset_y})) | |
231 | + |
@@ -0,0 +1,244 @@ | ||
1 | +#!/usr/bin/env python | |
2 | +# -*- coding: utf-8 -*- | |
3 | +import sys | |
4 | +import os.path | |
5 | +import tensorflow as tf | |
6 | +import time | |
7 | + | |
8 | +INPUT_WIDTH = 100 | |
9 | +INPUT_HEIGHT = 100 | |
10 | +INPUT_CHANNELS = 3 | |
11 | + | |
12 | +INPUT_SIZE = INPUT_WIDTH * INPUT_HEIGHT * INPUT_CHANNELS | |
13 | +W1_SIZE = 100 | |
14 | +W2_SIZE = 100 | |
15 | +OUTPUT_SIZE = 3 | |
16 | +LABEL_SIZE = OUTPUT_SIZE | |
17 | + | |
18 | +TEACH_FILES = ["../data2/teach_cat.tfrecord", | |
19 | + "../data2/teach_dog.tfrecord", | |
20 | + "../data2/teach_monkey.tfrecord"] | |
21 | +TEST_FILES = ["../data2/test_cat.tfrecord", | |
22 | + "../data2/test_dog.tfrecord", | |
23 | + "../data2/test_monkey.tfrecord"] | |
24 | + | |
25 | +MODEL_NAME = "./deep_model" | |
26 | + | |
27 | + | |
28 | +# 結果をそろえるために乱数の種を指定 | |
29 | +tf.set_random_seed(1111) | |
30 | + | |
31 | +## 入力と計算グラフを定義 | |
32 | +with tf.variable_scope('model') as scope: | |
33 | + x1 = tf.placeholder(dtype=tf.float32) | |
34 | + y = tf.placeholder(dtype=tf.float32) | |
35 | + | |
36 | + # ドロップアウト設定用のプレースホルダ | |
37 | + enable_dropout = tf.placeholder_with_default(0.0, [], name="enable_dropout") | |
38 | + | |
39 | + # ドロップアウト確率 | |
40 | + prob_one = tf.constant(1.0, dtype=tf.float32) | |
41 | + | |
42 | + # enable_dropoutが0の場合、キープ確率は1。そうでない場合、一定の確率に設定する | |
43 | + x1_keep_prob = prob_one - enable_dropout * 0.2 | |
44 | + x2_keep_prob = prob_one - enable_dropout * 0.5 | |
45 | + x3_keep_prob = prob_one - enable_dropout * 0.5 | |
46 | + | |
47 | + # 第1層のドロップアウトを設定 | |
48 | + x1_drop = tf.nn.dropout(x1, x1_keep_prob) | |
49 | + | |
50 | + # 第2層 | |
51 | + W1 = tf.get_variable("W1", | |
52 | + shape=[INPUT_SIZE, W1_SIZE], | |
53 | + dtype=tf.float32, | |
54 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
55 | + b1 = tf.get_variable("b1", | |
56 | + shape=[W1_SIZE], | |
57 | + dtype=tf.float32, | |
58 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
59 | + x2 = tf.nn.relu(tf.matmul(x1_drop, W1) + b1, name="x2") | |
60 | + | |
61 | + # 第2層のドロップアウトを設定 | |
62 | + x2_drop = tf.nn.dropout(x2, x2_keep_prob) | |
63 | + | |
64 | + # 第3層 | |
65 | + W2 = tf.get_variable("W2", | |
66 | + shape=[W1_SIZE, W2_SIZE], | |
67 | + dtype=tf.float32, | |
68 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
69 | + b2 = tf.get_variable("b2", | |
70 | + shape=[W2_SIZE], | |
71 | + dtype=tf.float32, | |
72 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
73 | + x3 = tf.nn.relu(tf.matmul(x2_drop, W2) + b2, name="x3") | |
74 | + | |
75 | + # 第3層のドロップアウトを設定 | |
76 | + x3_drop = tf.nn.dropout(x3, x3_keep_prob) | |
77 | + | |
78 | + # 第4層 | |
79 | + W3 = tf.get_variable("W3", | |
80 | + shape=[W2_SIZE, OUTPUT_SIZE], | |
81 | + dtype=tf.float32, | |
82 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
83 | + b3 = tf.get_variable("b3", | |
84 | + shape=[OUTPUT_SIZE], | |
85 | + dtype=tf.float32, | |
86 | + initializer=tf.random_normal_initializer(stddev=0.01)) | |
87 | + x4 = tf.nn.softmax(tf.matmul(x3_drop, W3) + b3, name="x4") | |
88 | + | |
89 | + # コスト関数 | |
90 | + cross_entropy = -tf.reduce_sum(y * tf.log(x4), name="cross_entropy") | |
91 | + tf.summary.scalar('cross_entropy', cross_entropy) | |
92 | + | |
93 | + # 正答率 | |
94 | + # 出力テンソルの中でもっとも値が大きいもののインデックスが | |
95 | + # 正答と等しいかどうかを計算する | |
96 | + correct = tf.equal(tf.argmax(x4,1), tf.argmax(y, 1), name="correct") | |
97 | + accuracy = tf.reduce_mean(tf.cast(correct, "float"), name="accuracy") | |
98 | + tf.summary.scalar('accuracy', accuracy) | |
99 | + | |
100 | + # 最適化アルゴリズムを定義 | |
101 | + global_step = tf.Variable(0, name='global_step', trainable=False) | |
102 | + optimizer = tf.train.AdamOptimizer(1e-5, name="optimizer") | |
103 | + minimize = optimizer.minimize(cross_entropy, global_step=global_step, name="minimize") | |
104 | + | |
105 | + # 学習結果を保存するためのオブジェクトを用意 | |
106 | + saver = tf.train.Saver() | |
107 | + | |
108 | + | |
109 | +# 読み込んだデータの変換用関数 | |
110 | +def map_dataset(serialized): | |
111 | + features = { | |
112 | + 'label': tf.FixedLenFeature([], tf.int64), | |
113 | + 'height': tf.FixedLenFeature([], tf.int64), | |
114 | + 'width': tf.FixedLenFeature([], tf.int64), | |
115 | + 'raw_image': tf.FixedLenFeature([INPUT_SIZE], tf.float32), | |
116 | + } | |
117 | + parsed = tf.parse_single_example(serialized, features) | |
118 | + | |
119 | + # 読み込んだデータを変換する | |
120 | + raw_label = tf.cast(parsed['label'], tf.int32) | |
121 | + label = tf.reshape(tf.slice(tf.eye(LABEL_SIZE), | |
122 | + [raw_label, 0], | |
123 | + [1, LABEL_SIZE]), | |
124 | + [LABEL_SIZE]) | |
125 | + | |
126 | + #image = tf.reshape(parsed['raw_image'], tf.stack([parsed['height'], parsed['width'], 3])) | |
127 | + image = parsed['raw_image'] | |
128 | + return (image, label, raw_label) | |
129 | + | |
130 | +## データセットの読み込み | |
131 | +# 読み出すデータは各データ200件ずつ×3で計600件 | |
132 | +# 600件を読み出したあとシャッフルし、dataset_sizeで指定した | |
133 | +# 件数ずつ読み出す | |
134 | +dataset_size = tf.placeholder(shape=[], dtype=tf.int64) | |
135 | +dataset = tf.data.TFRecordDataset(TEACH_FILES)\ | |
136 | + .map(map_dataset)\ | |
137 | + .repeat()\ | |
138 | + .shuffle(600)\ | |
139 | + .batch(dataset_size) | |
140 | + | |
141 | +# データにアクセスするためのイテレータを作成 | |
142 | +# 今回は繰り返しデータを初期化するので、make_initializable_iterator()で | |
143 | +# イテレータを作成する | |
144 | +#iterator = dataset.make_one_shot_iterator() | |
145 | +iterator = dataset.make_initializable_iterator() | |
146 | +next_dataset = iterator.get_next() | |
147 | + | |
148 | +# セッションの作成 | |
149 | +sess = tf.Session() | |
150 | + | |
151 | +# 変数の初期化を実行する | |
152 | +sess.run(tf.global_variables_initializer()) | |
153 | + | |
154 | + # 学習結果を保存したファイルが存在するかを確認し、 | |
155 | + # 存在していればそれを読み出す | |
156 | +latest_filename = tf.train.latest_checkpoint("./") | |
157 | +if latest_filename: | |
158 | + print("load saved model {}".format(latest_filename)) | |
159 | + saver.restore(sess, latest_filename) | |
160 | + | |
161 | +# サマリを取得するための処理 | |
162 | +summary_op = tf.summary.merge_all() | |
163 | +summary_writer = tf.summary.FileWriter('data', graph=sess.graph) | |
164 | +steps = tf.train.global_step(sess, global_step) | |
165 | + | |
166 | +## テスト用データセットを読み出す | |
167 | +# テストデータは50×3=150件 | |
168 | +dataset2 = tf.data.TFRecordDataset(TEST_FILES)\ | |
169 | + .map(map_dataset)\ | |
170 | + .batch(150) | |
171 | +iterator2 = dataset2.make_one_shot_iterator() | |
172 | +item2 = iterator2.get_next() | |
173 | +(testdataset_x, testdataset_y, testvalues_y) = sess.run(item2) | |
174 | + | |
175 | +test_summary = tf.summary.scalar('test_result', accuracy) | |
176 | + | |
177 | +## 全教師データに対する正答率を求めるために | |
178 | +# 教師データをすべて読み出しておく | |
179 | +# initializer実行時にパラメータを渡すことで | |
180 | +# 取得するデータセットのサイズを変更できる | |
181 | +sess.run(iterator.initializer, {dataset_size: 600}) | |
182 | +(dataset_all_x, dataset_all_y, values_all_y) = sess.run(next_dataset) | |
183 | + | |
184 | +if steps == 0: | |
185 | + # 初期状態を記録 | |
186 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
187 | + { x1: dataset_all_x, y: dataset_all_y }) | |
188 | + print("CROSS ENTROPY({}): {}".format(0, xe)) | |
189 | + print(" ACCURACY({}): {}".format(0, acc)) | |
190 | + summary_writer.add_summary(summary, global_step=0) | |
191 | + | |
192 | +# 学習を開始 | |
193 | +start_time = time.time() | |
194 | +sess.run(iterator.initializer, {dataset_size: 100}) | |
195 | +for i in range(90): | |
196 | + for j in range(100): | |
197 | + (dataset_x, dataset_y, values_y) = sess.run(next_dataset) | |
198 | + sess.run(minimize, | |
199 | + {x1: dataset_x, | |
200 | + y: dataset_y, | |
201 | + enable_dropout: 1.0}) | |
202 | + | |
203 | + # 途中経過を取得・保存 | |
204 | + xe, acc, summary = sess.run([cross_entropy, accuracy, summary_op], | |
205 | + {x1: dataset_all_x, y: dataset_all_y}) | |
206 | + acc2, summary2 = sess.run([accuracy, test_summary], | |
207 | + {x1: testdataset_x, y: testdataset_y}) | |
208 | + print("CROSS ENTROPY({}): {}".format(steps + 100 * (i+1), xe)) | |
209 | + print(" ACCURACY({}): {}".format(steps + 100 * (i+1), acc)) | |
210 | + print(" TEST RESULT({}): {}".format(steps + 100 * (i+1), acc2)) | |
211 | + summary_writer.add_summary(summary, global_step=tf.train.global_step(sess, global_step)) | |
212 | + summary_writer.add_summary(summary2, global_step=tf.train.global_step(sess, global_step)) | |
213 | + | |
214 | +# 学習終了 | |
215 | +print ("time: {} sec".format(time.time() - start_time)) | |
216 | + | |
217 | +save_path = saver.save(sess, MODEL_NAME, global_step=tf.train.global_step(sess, global_step)) | |
218 | +print("Model saved to {}".format(save_path)) | |
219 | + | |
220 | +## 結果の出力 | |
221 | + | |
222 | +# 学習に使用したデータを入力した場合の | |
223 | +# 正答率を計算する | |
224 | +print("----result with teaching data----") | |
225 | + | |
226 | +print("assumed label:") | |
227 | +print(sess.run(tf.argmax(x4, 1), {x1: dataset_all_x})) | |
228 | +print("real label:") | |
229 | +print(sess.run(tf.argmax(y, 1), {y: dataset_all_y})) | |
230 | +print("accuracy:", sess.run(accuracy, {x1: dataset_all_x, y: dataset_all_y})) | |
231 | + | |
232 | + | |
233 | +# テスト用データを入力した場合の | |
234 | +# 正答率を計算する | |
235 | +print("----result with test data----") | |
236 | + | |
237 | + | |
238 | +# 正答率を出力 | |
239 | +print("assumed label:") | |
240 | +print(sess.run(tf.argmax(x4, 1), {x1: testdataset_x})) | |
241 | +print("real label:") | |
242 | +print(sess.run(tf.argmax(y, 1), feed_dict={y: testdataset_y})) | |
243 | +print("accuracy:", sess.run(accuracy, {x1: testdataset_x, y: testdataset_y})) | |
244 | + |