SharedConvolutionNetwork2.ipynbOpen in CoCalc
Jupyter notebook ConvolutionNetwork2.ipynb
import tensorflow as tf
import numpy as np
import math
#%autoindent

try:
    from tqdm import tqdm
except ImportError:
    def tqdm(x, *args, **kwargs):
        return x
    
np.random.seed(0)

data = np.load('data_with_labels.npz')
#print('In the arhive included files: ', data.files)

train = data['arr_0']/255.
labels = data['arr_1']

# print('train[0]\n', train[0], train.shape[0])
# print('labels[0] = ', labels[0], len(labels))

import matplotlib.pyplot as plt
plt.ion()
%matplotlib inline

def to_onehot(labels, nclasses=5):
    outlabels = np.zeros((len(labels), nclasses))
    for i, l in enumerate(labels):
        outlabels[i,l] = 1
    return outlabels

onehot = to_onehot(labels)

indices = np.random.permutation(train.shape[0])
valid_cnt = int(train.shape[0]*0.1)# ~ 10% from all samples


test_idx, training_idx = indices[:valid_cnt],indices[valid_cnt:]
test, train = train[test_idx, :], train[training_idx, :]
onehot_test, onehot_train = onehot[test_idx,:],onehot[training_idx,:]

sess = tf.InteractiveSession()


x = tf.placeholder("float", [None, 36, 36])
x_im = tf.reshape(x, [-1, 36, 36, 1])
y_ = tf.placeholder("float", [None, 5])
num_filters = 4
winx = 5
winy = 5

W1 = tf.Variable(tf.truncated_normal([winx, winy, 1, num_filters],stddev=1./math.sqrt(winx*winy)))
b1 = tf.Variable(tf.constant(0.1, shape=[num_filters]))

xw = tf.nn.conv2d(x_im, W1, strides=[1, 1, 1, 1], padding='SAME')
h1 = tf.nn.relu(xw + b1)

pl = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
pl_size = np.product([s.value for s in pl.get_shape()[1:]])
plf = tf.reshape(pl, [-1, pl_size])

num_hidden = 32
W2 = tf.Variable(tf.truncated_normal([pl_size, num_hidden], stddev=2./math.sqrt(pl_size)))
b2 = tf.Variable(tf.constant(0.2, shape=[num_hidden]))
h2 = tf.nn.relu(tf.matmul(plf,W2) + b2)

W3 = tf.Variable(tf.truncated_normal([num_hidden, 5], stddev=1.0/math.sqrt(num_hidden)))
b3 = tf.Variable(tf.constant(0.1, shape=[5]))

keep_prob = tf.placeholder("float")
h2_drop = tf.nn.dropout(h2, keep_prob)
import time, sys

# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
def update_progress(progress, barLength=100):
     # Modify this to change the length of the progress bar
    status = ""
    if isinstance(progress, int):
        progress = float(progress)
    if not isinstance(progress, float):
        progress = 0
        status = "error: progress var must be float\r\n"
    if progress < 0:
        progress = 0
        status = "Halt...\r\n"
    if progress >= 1:
        progress = 1.0
        status = "Done...\r\n"
    block = int(barLength*progress)
    text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + " "*(barLength-block), progress*100, status)
    sys.stdout.write(text)
    sys.stdout.flush()
sess.run(tf.initialize_all_variables())

y = tf.nn.softmax(tf.matmul(h2_drop,W3) + b3)

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y + 1e-50, y_))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # Boolean is equl maxvalues in y and y_
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # The mean across a converted to 0, 1 from boolean

epochs = 5000
train_acc = np.zeros(epochs//10)
test_acc = np.zeros(epochs//10)

saver = tf.train.Saver()


for i in tqdm(range(epochs)):
    if i % 10 == 0:
        A = accuracy.eval(feed_dict={x: train, y_: onehot_train, keep_prob: 1.0})
        train_acc[i//10] = A
        A = accuracy.eval(feed_dict={x:test, y_: onehot_test, keep_prob: 1.0})
        test_acc[i//10] = A
    if i%5 == 0:
        update_progress(1.*i/epochs, 100)
    train_step.run(feed_dict={x: train, y_:onehot_train, keep_prob: 0.5})
    if i%500 == 0:
        saver.save(sess, 'conv_tmp.ckpt')
print("")
print(train_acc[-1])
print(test_acc[-1])
plt.figure(figsize=(6,6))
plt.plot(train_acc, 'bo')
plt.plot(test_acc, 'rx')
plt.show()
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-8-45b71eae5f7d> in <module>() 3 y = tf.nn.softmax(tf.matmul(h2_drop,W3) + b3) 4 ----> 5 cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y + 1e-50, y_)) 6 train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) 7 correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) # Boolean is equl maxvalues in y and y_ /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in softmax_cross_entropy_with_logits(_sentinel, labels, logits, dim, name) 1742 """ 1743 _ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, -> 1744 labels, logits) 1745 1746 # TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/nn_ops.py in _ensure_xent_args(name, sentinel, labels, logits) 1696 if sentinel is not None: 1697 raise ValueError("Only call `%s` with " -> 1698 "named arguments (labels=..., logits=..., ...)" % name) 1699 if labels is None or logits is None: 1700 raise ValueError("Both labels and logits must be provided.") ValueError: Only call `softmax_cross_entropy_with_logits` with named arguments (labels=..., logits=..., ...)
pred = np.argmax(y.eval(feed_dict={x: test, keep_prob: 1.0, y_: onehot_test}), axis=1)
conf = np.zeros([5, 5])

for p,t in zip(pred, np.argmax(onehot_test, axis=1)):
    conf[t,p] += 1
plt.matshow(conf)
plt.colorbar()
plt.show()

f, plts = plt.subplots(4)
for i in range(4):
    plts[i].matshow(W1.eval()[:,:,0,i])
plt.show()
plt.matshow(W3.eval())
plt.colorbar()
plt.show()
saver = tf.train.Saver()
saver.save(sess, "conv1.ckpt")

saver.restore(sess, "conv1.ckpt")

# Or use numpy manually
def save_all(name='conv1.npz'):
    np.savez_compressed(name, W1.eval(), b1.eval(), W2.eval(), b2.eval(), W3.eval(), b3.eval())

save_all()

def load_all(name='conv1.npz'):
    data = np.load(name)
    sess.run(W1.assign(data['arr_0']))
    sess.run(b1.assign(data['arr_1']))
    sess.run(W2.assign(data['arr_2']))
    sess.run(b2.assign(data['arr_3']))
    sess.run(W3.assign(data['arr_4']))
    sess.run(b3.assign(data['arr_5']))
    
load_all()

sess.close

--------------------------------------------------------------------------- NotFoundError Traceback (most recent call last) /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1322 try: -> 1323 return fn(*args) 1324 except errors.OpError as e: /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1301 feed_dict, fetch_list, target_list, -> 1302 status, run_metadata) 1303 /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg) 472 compat.as_text(c_api.TF_Message(self.status.status)), --> 473 c_api.TF_GetCode(self.status.status)) 474 # Delete the underlying status object from memory otherwise it stays alive NotFoundError: ; No such file or directory [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_save/Const_0_0, save/SaveV2/tensor_names, save/SaveV2/shape_and_slices, Variable, Variable_1, Variable_2, Variable_3, Variable_4, Variable_5)]] During handling of the above exception, another exception occurred: NotFoundError Traceback (most recent call last) /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py in save(self, sess, save_path, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state) 1572 self.saver_def.save_tensor_name, -> 1573 {self.saver_def.filename_tensor_name: checkpoint_file}) 1574 else: /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata) 888 result = self._run(None, fetches, feed_dict, options_ptr, --> 889 run_metadata_ptr) 890 if run_metadata: /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 1119 results = self._do_run(handle, final_targets, final_fetches, -> 1120 feed_dict_tensor, options, run_metadata) 1121 else: /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata) 1316 return self._do_call(_run_fn, self._session, feeds, fetches, targets, -> 1317 options, run_metadata) 1318 else: /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args) 1335 pass -> 1336 raise type(e)(node_def, op, message) 1337 NotFoundError: ; No such file or directory [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_save/Const_0_0, save/SaveV2/tensor_names, save/SaveV2/shape_and_slices, Variable, Variable_1, Variable_2, Variable_3, Variable_4, Variable_5)]] Caused by op 'save/SaveV2', defined at: File "/ext/anaconda3/lib/python3.5/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/ext/anaconda3/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py", line 3, in <module> app.launch_new_instance() File "/ext/anaconda3/lib/python3.5/site-packages/traitlets/config/application.py", line 658, in launch_instance app.start() File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/kernelapp.py", line 486, in start self.io_loop.start() File "/ext/anaconda3/lib/python3.5/site-packages/tornado/platform/asyncio.py", line 112, in start self.asyncio_loop.run_forever() File "/ext/anaconda3/lib/python3.5/asyncio/base_events.py", line 421, in run_forever self._run_once() File "/ext/anaconda3/lib/python3.5/asyncio/base_events.py", line 1425, in _run_once handle._run() File "/ext/anaconda3/lib/python3.5/asyncio/events.py", line 127, in _run self._callback(*self._args) File "/ext/anaconda3/lib/python3.5/site-packages/tornado/platform/asyncio.py", line 102, in _handle_events handler_func(fileobj, events) File "/ext/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 276, in null_wrapper return fn(*args, **kwargs) File "/ext/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 450, in _handle_events self._handle_recv() File "/ext/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 480, in _handle_recv self._run_callback(callback, msg) File "/ext/anaconda3/lib/python3.5/site-packages/zmq/eventloop/zmqstream.py", line 432, in _run_callback callback(*args, **kwargs) File "/ext/anaconda3/lib/python3.5/site-packages/tornado/stack_context.py", line 276, in null_wrapper return fn(*args, **kwargs) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher return self.dispatch_shell(stream, msg) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell handler(stream, idents, msg) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/kernelbase.py", line 399, in execute_request user_expressions, allow_stdin) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/ipkernel.py", line 208, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/ext/anaconda3/lib/python3.5/site-packages/ipykernel/zmqshell.py", line 537, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/ext/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2728, in run_cell interactivity=interactivity, compiler=compiler, result=result) File "/ext/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2850, in run_ast_nodes if self.run_code(code, result): File "/ext/anaconda3/lib/python3.5/site-packages/IPython/core/interactiveshell.py", line 2910, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-7-074c8a73319b>", line 1, in <module> saver = tf.train.Saver() File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 1218, in __init__ self.build() File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 1227, in build self._build(self._filename, build_save=True, build_restore=True) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 1263, in _build build_save=build_save, build_restore=build_restore) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 748, in _build_internal save_tensor = self._AddSaveOps(filename_tensor, saveables) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 296, in _AddSaveOps save = self.save_op(filename_tensor, saveables) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py", line 239, in save_op tensors) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/gen_io_ops.py", line 1163, in save_v2 shape_and_slices=shape_and_slices, tensors=tensors, name=name) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper op_def=op_def) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2956, in create_op op_def=op_def) File "/ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 1470, in __init__ self._traceback = self._graph._extract_stack() # pylint: disable=protected-access NotFoundError (see above for traceback): ; No such file or directory [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_save/Const_0_0, save/SaveV2/tensor_names, save/SaveV2/shape_and_slices, Variable, Variable_1, Variable_2, Variable_3, Variable_4, Variable_5)]] During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) <ipython-input-7-074c8a73319b> in <module>() 1 saver = tf.train.Saver() ----> 2 saver.save(sess, "conv1.ckpt") 3 4 saver.restore(sess, "conv1.ckpt") 5 /ext/anaconda3/lib/python3.5/site-packages/tensorflow/python/training/saver.py in save(self, sess, save_path, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state) 1592 "Parent directory of {} doesn't exist, can't save.".format( 1593 save_path)) -> 1594 raise exc 1595 1596 if write_meta_graph: ValueError: Parent directory of conv1.ckpt doesn't exist, can't save.