Github: https://github.com/dennybritz/cnn-text-classification-tf
Paper: https://arxiv.org/pdf/1408.5882.pdf
A sentence in the format of a string from the movie comments.
Eg. 1: "a thoughtful , provocative , insistently humanizing film . "
Eg. 2: "the effort is sincere and the results are honest , but the film is so bleak that it's hardly watchable ."
A binary sentiment classification result of the input sentence, 0 indicates negative and 1 indicates positive.
Eg. 1: 1 (positive)
Eg. 2: 0 (negative)
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
/kaggle/input/textclassification/rt-polarity.pos /kaggle/input/textclassification/rt-polarity.neg
f = open("/kaggle/input/textclassification/rt-polarity.neg", "r", encoding='utf-8')
a = f.readlines()
print(len(a))
for i in range(10):
print(a[i])
5331 simplistic , silly and tedious . it's so laddish and juvenile , only teenage boys could possibly find it funny . exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable . [garbus] discards the potential for pathological study , exhuming instead , the skewed melodrama of the circumstantial situation . a visually flashy but narratively opaque and emotionally vapid exercise in style and mystification . the story is also as unoriginal as they come , already having been recycled more times than i'd care to count . about the only thing to give the movie points for is bravado -- to take an entirely stale concept and push it through the audience's meat grinder one more time . not so much farcical as sour . unfortunately the story and the actors are served with a hack script . all the more disquieting for its relatively gore-free allusions to the serial murders , but it falls down in its attempts to humanize its subject .
import numpy as np
import re
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
You may use Word2vec or GloVe to replace the random initialization.
Tunable hyper parameters:
embedding_dim
You may try different nonlinearity other than relu.
Tunable hyper parameters:
filter_sizes
num_filters
Softmax function:
Cross Entropy Loss:
Tunable hyper parameters:
dropout_keep_prob (only for training)
l2_reg_lambda
All the tunable hyper parameters can be tuned at the third code block (train).
import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
Some tunable training hyper parameters which may influence the results:
batch_size
num_epochs
optimizer (line 94)
All the tunable hyper parameters can be tuned here.
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "/kaggle/input/textclassification/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "/kaggle/input/textclassification/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 15, "Number of training epochs (default: 20)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
# print("{}={}".format(attr.upper(), value))
# print("")
timestamp = str(int(time.time()))
def preprocess():
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
# TODO: This is very crude, should use cross-validation
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
return x_train, y_train, vocab_processor, x_dev, y_dev
def train(x_train, y_train, vocab_processor, x_dev, y_dev):
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
l2_reg_lambda=FLAGS.l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy = sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
# Generate batches
batches = batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
def main(argv=None):
x_train, y_train, vocab_processor, x_dev, y_dev = preprocess()
train(x_train, y_train, vocab_processor, x_dev, y_dev)
if __name__ == '__main__':
tf.app.run()
Loading data...
W0925 17:00:29.559506 139759124096384 deprecation.py:323] From <ipython-input-5-ef819b80d333>:54: VocabularyProcessor.__init__ (from tensorflow.contrib.learn.python.learn.preprocessing.text) is deprecated and will be removed in a future version. Instructions for updating: Please use tensorflow/transform or tf.data. W0925 17:00:29.560991 139759124096384 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/preprocessing/text.py:154: CategoricalVocabulary.__init__ (from tensorflow.contrib.learn.python.learn.preprocessing.categorical_vocabulary) is deprecated and will be removed in a future version. Instructions for updating: Please use tensorflow/transform or tf.data. W0925 17:00:29.562084 139759124096384 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/preprocessing/text.py:170: tokenizer (from tensorflow.contrib.learn.python.learn.preprocessing.text) is deprecated and will be removed in a future version. Instructions for updating: Please use tensorflow/transform or tf.data. W0925 17:00:30.303619 139759124096384 deprecation.py:506] From <ipython-input-4-93eeefc639d8>:62: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version. Instructions for updating: Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`. W0925 17:00:30.342479 139759124096384 deprecation.py:323] From <ipython-input-4-93eeefc639d8>:78: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version. Instructions for updating: Future major versions of TensorFlow will allow gradients to flow into the labels input on backprop by default. See `tf.nn.softmax_cross_entropy_with_logits_v2`.
Vocabulary Size: 18758 Train/Dev split: 9596/1066
I0925 17:00:30.728190 139759124096384 summary_op_util.py:66] Summary name embedding/W:0/grad/hist is illegal; using embedding/W_0/grad/hist instead. I0925 17:00:30.778558 139759124096384 summary_op_util.py:66] Summary name embedding/W:0/grad/sparsity is illegal; using embedding/W_0/grad/sparsity instead. I0925 17:00:30.782682 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-3/W:0/grad/hist is illegal; using conv-maxpool-3/W_0/grad/hist instead. I0925 17:00:30.807168 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-3/W:0/grad/sparsity is illegal; using conv-maxpool-3/W_0/grad/sparsity instead. I0925 17:00:30.811049 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-3/b:0/grad/hist is illegal; using conv-maxpool-3/b_0/grad/hist instead. I0925 17:00:30.837991 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-3/b:0/grad/sparsity is illegal; using conv-maxpool-3/b_0/grad/sparsity instead. I0925 17:00:30.841710 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-4/W:0/grad/hist is illegal; using conv-maxpool-4/W_0/grad/hist instead. I0925 17:00:31.048312 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-4/W:0/grad/sparsity is illegal; using conv-maxpool-4/W_0/grad/sparsity instead. I0925 17:00:31.052096 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-4/b:0/grad/hist is illegal; using conv-maxpool-4/b_0/grad/hist instead. I0925 17:00:31.076743 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-4/b:0/grad/sparsity is illegal; using conv-maxpool-4/b_0/grad/sparsity instead. I0925 17:00:31.080537 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-5/W:0/grad/hist is illegal; using conv-maxpool-5/W_0/grad/hist instead. I0925 17:00:31.107031 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-5/W:0/grad/sparsity is illegal; using conv-maxpool-5/W_0/grad/sparsity instead. I0925 17:00:31.110422 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-5/b:0/grad/hist is illegal; using conv-maxpool-5/b_0/grad/hist instead. I0925 17:00:31.138578 139759124096384 summary_op_util.py:66] Summary name conv-maxpool-5/b:0/grad/sparsity is illegal; using conv-maxpool-5/b_0/grad/sparsity instead. I0925 17:00:31.142539 139759124096384 summary_op_util.py:66] Summary name W:0/grad/hist is illegal; using W_0/grad/hist instead. I0925 17:00:31.169859 139759124096384 summary_op_util.py:66] Summary name W:0/grad/sparsity is illegal; using W_0/grad/sparsity instead. I0925 17:00:31.173630 139759124096384 summary_op_util.py:66] Summary name output/b:0/grad/hist is illegal; using output/b_0/grad/hist instead. I0925 17:00:31.200944 139759124096384 summary_op_util.py:66] Summary name output/b:0/grad/sparsity is illegal; using output/b_0/grad/sparsity instead.
Writing to /kaggle/working/runs/1569430829 2019-09-25T17:00:32.386895: step 1, loss 1.77248, acc 0.5625 2019-09-25T17:00:32.519622: step 2, loss 1.49974, acc 0.546875 2019-09-25T17:00:32.644681: step 3, loss 2.6099, acc 0.421875 2019-09-25T17:00:32.783428: step 4, loss 1.94969, acc 0.5 2019-09-25T17:00:32.907467: step 5, loss 2.30907, acc 0.4375 2019-09-25T17:00:33.029999: step 6, loss 1.70543, acc 0.578125 2019-09-25T17:00:33.156196: step 7, loss 2.19044, acc 0.53125 2019-09-25T17:00:33.280473: step 8, loss 2.71871, acc 0.328125 2019-09-25T17:00:33.408293: step 9, loss 1.5869, acc 0.53125 2019-09-25T17:00:33.532986: step 10, loss 1.27567, acc 0.53125 2019-09-25T17:00:33.654873: step 11, loss 1.34193, acc 0.65625 2019-09-25T17:00:33.788257: step 12, loss 1.89354, acc 0.484375 2019-09-25T17:00:33.914741: step 13, loss 1.67965, acc 0.5 2019-09-25T17:00:34.035065: step 14, loss 2.36722, acc 0.4375 2019-09-25T17:00:34.158477: step 15, loss 1.77334, acc 0.453125 2019-09-25T17:00:34.282359: step 16, loss 1.66733, acc 0.515625 2019-09-25T17:00:34.409150: step 17, loss 1.4895, acc 0.5625 2019-09-25T17:00:34.534044: step 18, loss 1.58023, acc 0.546875 2019-09-25T17:00:34.659337: step 19, loss 1.79016, acc 0.515625 2019-09-25T17:00:34.788747: step 20, loss 1.61095, acc 0.546875 2019-09-25T17:00:34.915617: step 21, loss 1.52275, acc 0.53125 2019-09-25T17:00:35.041170: step 22, loss 1.56549, acc 0.59375 2019-09-25T17:00:35.166074: step 23, loss 1.81266, acc 0.46875 2019-09-25T17:00:35.290682: step 24, loss 1.9727, acc 0.4375 2019-09-25T17:00:35.419401: step 25, loss 1.56393, acc 0.53125 2019-09-25T17:00:35.545787: step 26, loss 1.69053, acc 0.5 2019-09-25T17:00:35.669348: step 27, loss 1.55592, acc 0.46875 2019-09-25T17:00:35.796567: step 28, loss 1.51422, acc 0.546875 2019-09-25T17:00:35.929745: step 29, loss 1.64995, acc 0.46875 2019-09-25T17:00:36.055241: step 30, loss 1.971, acc 0.453125 2019-09-25T17:00:36.181117: step 31, loss 1.8287, acc 0.421875 2019-09-25T17:00:36.308021: step 32, loss 1.46421, acc 0.5625 2019-09-25T17:00:36.436099: step 33, loss 1.54428, acc 0.5 2019-09-25T17:00:36.566748: step 34, loss 1.83913, acc 0.4375 2019-09-25T17:00:36.690221: step 35, loss 1.77614, acc 0.421875 2019-09-25T17:00:36.819138: step 36, loss 1.43019, acc 0.578125 2019-09-25T17:00:36.960647: step 37, loss 1.27914, acc 0.578125 2019-09-25T17:00:37.087734: step 38, loss 1.9158, acc 0.53125 2019-09-25T17:00:37.213083: step 39, loss 1.71591, acc 0.484375 2019-09-25T17:00:37.341240: step 40, loss 1.48896, acc 0.5 2019-09-25T17:00:37.469220: step 41, loss 1.43792, acc 0.59375 2019-09-25T17:00:37.598170: step 42, loss 1.69443, acc 0.515625 2019-09-25T17:00:37.723480: step 43, loss 1.66358, acc 0.53125 2019-09-25T17:00:37.849156: step 44, loss 2.22208, acc 0.46875 2019-09-25T17:00:37.984476: step 45, loss 1.65371, acc 0.515625 2019-09-25T17:00:38.108649: step 46, loss 1.54421, acc 0.484375 2019-09-25T17:00:38.238627: step 47, loss 1.42288, acc 0.65625 2019-09-25T17:00:38.368215: step 48, loss 1.55853, acc 0.578125 2019-09-25T17:00:38.492723: step 49, loss 1.78763, acc 0.4375 2019-09-25T17:00:38.618625: step 50, loss 1.43299, acc 0.515625 2019-09-25T17:00:38.745536: step 51, loss 1.53455, acc 0.484375 2019-09-25T17:00:38.874773: step 52, loss 1.51901, acc 0.5 2019-09-25T17:00:39.010011: step 53, loss 1.47843, acc 0.46875 2019-09-25T17:00:39.136903: step 54, loss 1.234, acc 0.609375 2019-09-25T17:00:39.260999: step 55, loss 1.36357, acc 0.5625 2019-09-25T17:00:39.390281: step 56, loss 1.46909, acc 0.5625 2019-09-25T17:00:39.513917: step 57, loss 1.74346, acc 0.53125 2019-09-25T17:00:39.637829: step 58, loss 1.82912, acc 0.5 2019-09-25T17:00:39.762802: step 59, loss 1.20186, acc 0.625 2019-09-25T17:00:39.883147: step 60, loss 1.64298, acc 0.578125 2019-09-25T17:00:40.017733: step 61, loss 1.40217, acc 0.53125 2019-09-25T17:00:40.147431: step 62, loss 2.22851, acc 0.359375 2019-09-25T17:00:40.273736: step 63, loss 1.27942, acc 0.59375 2019-09-25T17:00:40.405648: step 64, loss 1.4255, acc 0.5 2019-09-25T17:00:40.530207: step 65, loss 1.55377, acc 0.453125 2019-09-25T17:00:40.656813: step 66, loss 1.52931, acc 0.53125 2019-09-25T17:00:40.781531: step 67, loss 1.39248, acc 0.53125 2019-09-25T17:00:40.906952: step 68, loss 1.32165, acc 0.578125 2019-09-25T17:00:41.037836: step 69, loss 1.73337, acc 0.546875 2019-09-25T17:00:41.163353: step 70, loss 1.43246, acc 0.515625 2019-09-25T17:00:41.289202: step 71, loss 1.32467, acc 0.5625 2019-09-25T17:00:41.413990: step 72, loss 1.27563, acc 0.53125 2019-09-25T17:00:41.538854: step 73, loss 1.47441, acc 0.609375 2019-09-25T17:00:41.664577: step 74, loss 1.45705, acc 0.453125 2019-09-25T17:00:41.789630: step 75, loss 1.16507, acc 0.546875 2019-09-25T17:00:41.916556: step 76, loss 1.35123, acc 0.59375 2019-09-25T17:00:42.052688: step 77, loss 1.23552, acc 0.59375 2019-09-25T17:00:42.176805: step 78, loss 1.19487, acc 0.515625 2019-09-25T17:00:42.301569: step 79, loss 1.52772, acc 0.53125 2019-09-25T17:00:42.429342: step 80, loss 1.94578, acc 0.484375 2019-09-25T17:00:42.554110: step 81, loss 1.55394, acc 0.53125 2019-09-25T17:00:42.679069: step 82, loss 1.20209, acc 0.59375 2019-09-25T17:00:42.803800: step 83, loss 1.52837, acc 0.484375 2019-09-25T17:00:42.930773: step 84, loss 0.978516, acc 0.640625 2019-09-25T17:00:43.054628: step 85, loss 1.59731, acc 0.484375 2019-09-25T17:00:43.185320: step 86, loss 1.41231, acc 0.53125 2019-09-25T17:00:43.312047: step 87, loss 1.18954, acc 0.578125 2019-09-25T17:00:43.445110: step 88, loss 1.31077, acc 0.53125 2019-09-25T17:00:43.568546: step 89, loss 1.34908, acc 0.625 2019-09-25T17:00:43.694512: step 90, loss 1.58051, acc 0.46875 2019-09-25T17:00:43.821997: step 91, loss 1.94676, acc 0.46875 2019-09-25T17:00:43.946994: step 92, loss 1.74854, acc 0.4375 2019-09-25T17:00:44.071951: step 93, loss 1.40237, acc 0.53125 2019-09-25T17:00:44.210217: step 94, loss 1.35653, acc 0.59375 2019-09-25T17:00:44.341336: step 95, loss 1.41723, acc 0.53125 2019-09-25T17:00:44.466585: step 96, loss 1.30241, acc 0.5625 2019-09-25T17:00:44.593204: step 97, loss 1.59121, acc 0.5625 2019-09-25T17:00:44.717279: step 98, loss 1.64779, acc 0.4375 2019-09-25T17:00:44.843226: step 99, loss 1.50075, acc 0.53125 2019-09-25T17:00:44.964689: step 100, loss 1.14148, acc 0.515625 Evaluation: 2019-09-25T17:00:45.297944: step 100, loss 0.839263, acc 0.570356 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-100 2019-09-25T17:00:45.560134: step 101, loss 1.08603, acc 0.640625 2019-09-25T17:00:45.680604: step 102, loss 1.35948, acc 0.5625 2019-09-25T17:00:45.801673: step 103, loss 1.54061, acc 0.46875 2019-09-25T17:00:45.924444: step 104, loss 1.31793, acc 0.484375 2019-09-25T17:00:46.071259: step 105, loss 1.32484, acc 0.515625 2019-09-25T17:00:46.196857: step 106, loss 1.34648, acc 0.546875 2019-09-25T17:00:46.333487: step 107, loss 1.54225, acc 0.484375 2019-09-25T17:00:46.457901: step 108, loss 1.32834, acc 0.53125 2019-09-25T17:00:46.582756: step 109, loss 1.58635, acc 0.5 2019-09-25T17:00:46.704009: step 110, loss 1.10807, acc 0.59375 2019-09-25T17:00:46.823044: step 111, loss 1.07092, acc 0.65625 2019-09-25T17:00:46.945131: step 112, loss 1.24876, acc 0.578125 2019-09-25T17:00:47.069352: step 113, loss 1.16603, acc 0.578125 2019-09-25T17:00:47.190326: step 114, loss 1.12419, acc 0.546875 2019-09-25T17:00:47.324970: step 115, loss 1.29868, acc 0.53125 2019-09-25T17:00:47.445779: step 116, loss 1.31522, acc 0.5625 2019-09-25T17:00:47.565267: step 117, loss 1.34387, acc 0.5625 2019-09-25T17:00:47.685961: step 118, loss 1.262, acc 0.5625 2019-09-25T17:00:47.808111: step 119, loss 0.954412, acc 0.65625 2019-09-25T17:00:47.928715: step 120, loss 1.00934, acc 0.5625 2019-09-25T17:00:48.050492: step 121, loss 1.44754, acc 0.46875 2019-09-25T17:00:48.169377: step 122, loss 1.48516, acc 0.515625 2019-09-25T17:00:48.293458: step 123, loss 1.1258, acc 0.53125 2019-09-25T17:00:48.422499: step 124, loss 1.02416, acc 0.609375 2019-09-25T17:00:48.544146: step 125, loss 1.52395, acc 0.515625 2019-09-25T17:00:48.664525: step 126, loss 1.24827, acc 0.546875 2019-09-25T17:00:48.782690: step 127, loss 1.18272, acc 0.578125 2019-09-25T17:00:48.907093: step 128, loss 0.861729, acc 0.59375 2019-09-25T17:00:49.026118: step 129, loss 1.02541, acc 0.59375 2019-09-25T17:00:49.146865: step 130, loss 1.15537, acc 0.5 2019-09-25T17:00:49.269521: step 131, loss 1.53931, acc 0.46875 2019-09-25T17:00:49.402386: step 132, loss 1.71227, acc 0.5 2019-09-25T17:00:49.525672: step 133, loss 1.24744, acc 0.484375 2019-09-25T17:00:49.646693: step 134, loss 1.29925, acc 0.53125 2019-09-25T17:00:49.767895: step 135, loss 1.33987, acc 0.5 2019-09-25T17:00:49.889869: step 136, loss 1.00864, acc 0.53125 2019-09-25T17:00:50.010870: step 137, loss 1.11424, acc 0.5625 2019-09-25T17:00:50.134152: step 138, loss 1.11673, acc 0.609375 2019-09-25T17:00:50.257333: step 139, loss 0.897828, acc 0.65625 2019-09-25T17:00:50.382790: step 140, loss 0.991726, acc 0.609375 2019-09-25T17:00:50.508260: step 141, loss 0.952575, acc 0.71875 2019-09-25T17:00:50.632191: step 142, loss 0.927998, acc 0.5625 2019-09-25T17:00:50.755115: step 143, loss 1.23095, acc 0.578125 2019-09-25T17:00:50.878264: step 144, loss 0.991544, acc 0.59375 2019-09-25T17:00:51.001122: step 145, loss 1.07718, acc 0.640625 2019-09-25T17:00:51.127238: step 146, loss 0.928349, acc 0.5625 2019-09-25T17:00:51.248441: step 147, loss 1.4043, acc 0.5 2019-09-25T17:00:51.373499: step 148, loss 0.855126, acc 0.53125 2019-09-25T17:00:51.507632: step 149, loss 0.834092, acc 0.578125 2019-09-25T17:00:51.629078: step 150, loss 1.4361, acc 0.483333 2019-09-25T17:00:51.753347: step 151, loss 1.01003, acc 0.625 2019-09-25T17:00:51.875444: step 152, loss 1.09015, acc 0.625 2019-09-25T17:00:52.001801: step 153, loss 1.02693, acc 0.625 2019-09-25T17:00:52.123560: step 154, loss 1.02409, acc 0.578125 2019-09-25T17:00:52.245824: step 155, loss 1.07943, acc 0.5 2019-09-25T17:00:52.369600: step 156, loss 0.841851, acc 0.640625 2019-09-25T17:00:52.498728: step 157, loss 1.08602, acc 0.640625 2019-09-25T17:00:52.625216: step 158, loss 0.906043, acc 0.59375 2019-09-25T17:00:52.750027: step 159, loss 0.657089, acc 0.71875 2019-09-25T17:00:52.877750: step 160, loss 0.885782, acc 0.671875 2019-09-25T17:00:53.002346: step 161, loss 0.752143, acc 0.6875 2019-09-25T17:00:53.126313: step 162, loss 1.08779, acc 0.59375 2019-09-25T17:00:53.249975: step 163, loss 1.34797, acc 0.53125 2019-09-25T17:00:53.375007: step 164, loss 0.937342, acc 0.609375 2019-09-25T17:00:53.507529: step 165, loss 1.06686, acc 0.609375 2019-09-25T17:00:53.629294: step 166, loss 1.22982, acc 0.515625 2019-09-25T17:00:53.753457: step 167, loss 0.795122, acc 0.578125 2019-09-25T17:00:53.878600: step 168, loss 1.21615, acc 0.5625 2019-09-25T17:00:53.999542: step 169, loss 0.615118, acc 0.6875 2019-09-25T17:00:54.119573: step 170, loss 0.923332, acc 0.59375 2019-09-25T17:00:54.241556: step 171, loss 0.771172, acc 0.65625 2019-09-25T17:00:54.367658: step 172, loss 0.820939, acc 0.53125 2019-09-25T17:00:54.488849: step 173, loss 1.08364, acc 0.53125 2019-09-25T17:00:54.620791: step 174, loss 0.679736, acc 0.71875 2019-09-25T17:00:54.742390: step 175, loss 0.850962, acc 0.609375 2019-09-25T17:00:54.863198: step 176, loss 0.963819, acc 0.640625 2019-09-25T17:00:54.984451: step 177, loss 0.906584, acc 0.65625 2019-09-25T17:00:55.103785: step 178, loss 0.868638, acc 0.59375 2019-09-25T17:00:55.228655: step 179, loss 0.839219, acc 0.65625 2019-09-25T17:00:55.353598: step 180, loss 0.783981, acc 0.6875 2019-09-25T17:00:55.480231: step 181, loss 1.17675, acc 0.515625 2019-09-25T17:00:55.607634: step 182, loss 0.98836, acc 0.640625 2019-09-25T17:00:55.730682: step 183, loss 0.959743, acc 0.625 2019-09-25T17:00:55.854381: step 184, loss 0.968944, acc 0.609375 2019-09-25T17:00:55.977637: step 185, loss 0.878632, acc 0.53125 2019-09-25T17:00:56.102731: step 186, loss 0.776249, acc 0.59375 2019-09-25T17:00:56.225985: step 187, loss 0.860641, acc 0.640625 2019-09-25T17:00:56.349265: step 188, loss 0.938771, acc 0.578125 2019-09-25T17:00:56.470956: step 189, loss 0.645408, acc 0.703125 2019-09-25T17:00:56.598682: step 190, loss 0.973008, acc 0.609375 2019-09-25T17:00:56.727475: step 191, loss 1.15286, acc 0.5 2019-09-25T17:00:56.849470: step 192, loss 1.17012, acc 0.5 2019-09-25T17:00:56.976914: step 193, loss 0.689789, acc 0.765625 2019-09-25T17:00:57.099461: step 194, loss 0.786288, acc 0.609375 2019-09-25T17:00:57.225625: step 195, loss 0.79192, acc 0.609375 2019-09-25T17:00:57.347975: step 196, loss 0.910968, acc 0.59375 2019-09-25T17:00:57.470232: step 197, loss 1.03097, acc 0.609375 2019-09-25T17:00:57.591753: step 198, loss 0.793189, acc 0.65625 2019-09-25T17:00:57.729315: step 199, loss 0.725679, acc 0.65625 2019-09-25T17:00:57.851622: step 200, loss 0.936986, acc 0.703125 Evaluation: 2019-09-25T17:00:58.079871: step 200, loss 0.648391, acc 0.630394 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-200 2019-09-25T17:00:58.349480: step 201, loss 0.861039, acc 0.640625 2019-09-25T17:00:58.471427: step 202, loss 0.649987, acc 0.734375 2019-09-25T17:00:58.592555: step 203, loss 0.978366, acc 0.59375 2019-09-25T17:00:58.716341: step 204, loss 0.870169, acc 0.5625 2019-09-25T17:00:58.838375: step 205, loss 0.97306, acc 0.5 2019-09-25T17:00:58.958227: step 206, loss 1.18079, acc 0.453125 2019-09-25T17:00:59.077737: step 207, loss 1.02019, acc 0.546875 2019-09-25T17:00:59.199441: step 208, loss 0.654179, acc 0.71875 2019-09-25T17:00:59.326941: step 209, loss 0.741731, acc 0.609375 2019-09-25T17:00:59.448025: step 210, loss 1.10258, acc 0.484375 2019-09-25T17:00:59.570980: step 211, loss 0.775204, acc 0.59375 2019-09-25T17:00:59.693153: step 212, loss 0.78424, acc 0.65625 2019-09-25T17:00:59.831044: step 213, loss 0.951675, acc 0.5625 2019-09-25T17:00:59.960726: step 214, loss 0.769945, acc 0.640625 2019-09-25T17:01:00.082710: step 215, loss 0.817906, acc 0.578125 2019-09-25T17:01:00.205079: step 216, loss 0.89017, acc 0.5625 2019-09-25T17:01:00.327580: step 217, loss 0.953234, acc 0.546875 2019-09-25T17:01:00.447658: step 218, loss 1.09504, acc 0.546875 2019-09-25T17:01:00.569708: step 219, loss 1.07575, acc 0.546875 2019-09-25T17:01:00.688568: step 220, loss 0.949013, acc 0.546875 2019-09-25T17:01:00.820614: step 221, loss 1.13894, acc 0.53125 2019-09-25T17:01:00.949904: step 222, loss 0.805544, acc 0.609375 2019-09-25T17:01:01.069055: step 223, loss 0.775876, acc 0.65625 2019-09-25T17:01:01.190780: step 224, loss 0.902221, acc 0.640625 2019-09-25T17:01:01.336177: step 225, loss 0.828649, acc 0.609375 2019-09-25T17:01:01.460430: step 226, loss 0.887051, acc 0.5625 2019-09-25T17:01:01.581954: step 227, loss 0.562009, acc 0.71875 2019-09-25T17:01:01.703369: step 228, loss 0.98527, acc 0.515625 2019-09-25T17:01:01.829230: step 229, loss 0.8373, acc 0.578125 2019-09-25T17:01:01.950760: step 230, loss 0.892599, acc 0.53125 2019-09-25T17:01:02.074358: step 231, loss 0.72358, acc 0.703125 2019-09-25T17:01:02.197532: step 232, loss 0.815822, acc 0.578125 2019-09-25T17:01:02.320240: step 233, loss 1.01301, acc 0.515625 2019-09-25T17:01:02.439443: step 234, loss 0.743566, acc 0.609375 2019-09-25T17:01:02.560905: step 235, loss 0.627555, acc 0.65625 2019-09-25T17:01:02.685717: step 236, loss 0.694021, acc 0.65625 2019-09-25T17:01:02.808054: step 237, loss 0.769906, acc 0.515625 2019-09-25T17:01:02.941978: step 238, loss 0.949507, acc 0.59375 2019-09-25T17:01:03.062136: step 239, loss 0.751453, acc 0.640625 2019-09-25T17:01:03.185160: step 240, loss 0.691878, acc 0.640625 2019-09-25T17:01:03.309190: step 241, loss 0.86891, acc 0.65625 2019-09-25T17:01:03.432479: step 242, loss 0.732322, acc 0.65625 2019-09-25T17:01:03.553804: step 243, loss 0.946897, acc 0.5625 2019-09-25T17:01:03.676004: step 244, loss 1.13761, acc 0.515625 2019-09-25T17:01:03.806143: step 245, loss 0.942177, acc 0.609375 2019-09-25T17:01:03.941067: step 246, loss 0.815585, acc 0.59375 2019-09-25T17:01:04.063039: step 247, loss 0.837692, acc 0.609375 2019-09-25T17:01:04.184727: step 248, loss 0.898172, acc 0.5625 2019-09-25T17:01:04.307801: step 249, loss 0.833638, acc 0.546875 2019-09-25T17:01:04.430537: step 250, loss 0.846693, acc 0.640625 2019-09-25T17:01:04.551733: step 251, loss 0.908076, acc 0.578125 2019-09-25T17:01:04.673598: step 252, loss 0.946175, acc 0.609375 2019-09-25T17:01:04.797143: step 253, loss 0.973712, acc 0.546875 2019-09-25T17:01:04.919071: step 254, loss 0.823099, acc 0.609375 2019-09-25T17:01:05.045453: step 255, loss 0.770105, acc 0.640625 2019-09-25T17:01:05.168577: step 256, loss 0.633141, acc 0.640625 2019-09-25T17:01:05.289669: step 257, loss 0.929415, acc 0.53125 2019-09-25T17:01:05.415390: step 258, loss 0.70857, acc 0.546875 2019-09-25T17:01:05.535881: step 259, loss 0.636139, acc 0.671875 2019-09-25T17:01:05.658050: step 260, loss 0.682989, acc 0.65625 2019-09-25T17:01:05.776724: step 261, loss 0.676583, acc 0.671875 2019-09-25T17:01:05.897403: step 262, loss 0.719718, acc 0.625 2019-09-25T17:01:06.029061: step 263, loss 0.584312, acc 0.734375 2019-09-25T17:01:06.152237: step 264, loss 0.679183, acc 0.671875 2019-09-25T17:01:06.273940: step 265, loss 0.673819, acc 0.6875 2019-09-25T17:01:06.399136: step 266, loss 0.684903, acc 0.609375 2019-09-25T17:01:06.522059: step 267, loss 0.743403, acc 0.609375 2019-09-25T17:01:06.644938: step 268, loss 0.764797, acc 0.609375 2019-09-25T17:01:06.765604: step 269, loss 0.676125, acc 0.640625 2019-09-25T17:01:06.889077: step 270, loss 0.731736, acc 0.625 2019-09-25T17:01:07.013844: step 271, loss 0.622884, acc 0.640625 2019-09-25T17:01:07.137390: step 272, loss 0.700243, acc 0.640625 2019-09-25T17:01:07.258540: step 273, loss 0.82969, acc 0.53125 2019-09-25T17:01:07.381931: step 274, loss 0.790294, acc 0.609375 2019-09-25T17:01:07.501428: step 275, loss 0.747348, acc 0.640625 2019-09-25T17:01:07.622043: step 276, loss 0.825509, acc 0.609375 2019-09-25T17:01:07.742611: step 277, loss 0.905408, acc 0.578125 2019-09-25T17:01:07.865703: step 278, loss 0.772351, acc 0.578125 2019-09-25T17:01:07.986000: step 279, loss 0.719016, acc 0.59375 2019-09-25T17:01:08.116694: step 280, loss 0.714033, acc 0.65625 2019-09-25T17:01:08.237603: step 281, loss 0.752012, acc 0.703125 2019-09-25T17:01:08.358659: step 282, loss 0.979427, acc 0.59375 2019-09-25T17:01:08.478646: step 283, loss 0.70609, acc 0.65625 2019-09-25T17:01:08.602987: step 284, loss 0.800573, acc 0.625 2019-09-25T17:01:08.722528: step 285, loss 0.686935, acc 0.6875 2019-09-25T17:01:08.843115: step 286, loss 0.793062, acc 0.546875 2019-09-25T17:01:08.963930: step 287, loss 0.90429, acc 0.5 2019-09-25T17:01:09.087885: step 288, loss 0.717316, acc 0.609375 2019-09-25T17:01:09.217587: step 289, loss 0.605257, acc 0.6875 2019-09-25T17:01:09.346751: step 290, loss 0.841657, acc 0.546875 2019-09-25T17:01:09.468447: step 291, loss 0.612488, acc 0.71875 2019-09-25T17:01:09.590463: step 292, loss 0.721735, acc 0.65625 2019-09-25T17:01:09.711917: step 293, loss 0.751362, acc 0.609375 2019-09-25T17:01:09.833012: step 294, loss 0.600256, acc 0.671875 2019-09-25T17:01:09.957134: step 295, loss 0.664891, acc 0.640625 2019-09-25T17:01:10.082036: step 296, loss 0.854615, acc 0.5 2019-09-25T17:01:10.223554: step 297, loss 0.783006, acc 0.625 2019-09-25T17:01:10.355367: step 298, loss 0.894386, acc 0.546875 2019-09-25T17:01:10.477347: step 299, loss 0.756453, acc 0.625 2019-09-25T17:01:10.598367: step 300, loss 0.875806, acc 0.616667 Evaluation: 2019-09-25T17:01:10.824446: step 300, loss 0.626363, acc 0.648218 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-300 2019-09-25T17:01:11.057669: step 301, loss 0.65386, acc 0.59375 2019-09-25T17:01:11.183876: step 302, loss 0.756695, acc 0.640625 2019-09-25T17:01:11.327088: step 303, loss 0.927393, acc 0.546875 2019-09-25T17:01:11.497820: step 304, loss 0.557825, acc 0.6875 2019-09-25T17:01:11.635309: step 305, loss 0.596302, acc 0.71875 2019-09-25T17:01:11.771802: step 306, loss 0.683932, acc 0.640625 2019-09-25T17:01:11.908933: step 307, loss 0.747834, acc 0.65625 2019-09-25T17:01:12.033904: step 308, loss 0.649369, acc 0.671875 2019-09-25T17:01:12.156288: step 309, loss 0.722868, acc 0.5625 2019-09-25T17:01:12.291716: step 310, loss 0.621909, acc 0.671875 2019-09-25T17:01:12.418531: step 311, loss 0.652029, acc 0.625 2019-09-25T17:01:12.544112: step 312, loss 0.738565, acc 0.65625 2019-09-25T17:01:12.673892: step 313, loss 0.622401, acc 0.6875 2019-09-25T17:01:12.796633: step 314, loss 0.710374, acc 0.734375 2019-09-25T17:01:12.917877: step 315, loss 0.656599, acc 0.671875 2019-09-25T17:01:13.039582: step 316, loss 0.587401, acc 0.765625 2019-09-25T17:01:13.161662: step 317, loss 0.722019, acc 0.65625 2019-09-25T17:01:13.284127: step 318, loss 0.715434, acc 0.609375 2019-09-25T17:01:13.408351: step 319, loss 0.699464, acc 0.65625 2019-09-25T17:01:13.527881: step 320, loss 0.612558, acc 0.703125 2019-09-25T17:01:13.650374: step 321, loss 0.711222, acc 0.640625 2019-09-25T17:01:13.769154: step 322, loss 0.680515, acc 0.671875 2019-09-25T17:01:13.894508: step 323, loss 0.706054, acc 0.65625 2019-09-25T17:01:14.013094: step 324, loss 0.602688, acc 0.703125 2019-09-25T17:01:14.135938: step 325, loss 0.519614, acc 0.734375 2019-09-25T17:01:14.256613: step 326, loss 0.656656, acc 0.65625 2019-09-25T17:01:14.390213: step 327, loss 0.565506, acc 0.765625 2019-09-25T17:01:14.511374: step 328, loss 0.572518, acc 0.703125 2019-09-25T17:01:14.631104: step 329, loss 0.967633, acc 0.484375 2019-09-25T17:01:14.750165: step 330, loss 0.640382, acc 0.71875 2019-09-25T17:01:14.870980: step 331, loss 0.618605, acc 0.703125 2019-09-25T17:01:14.992779: step 332, loss 0.737213, acc 0.640625 2019-09-25T17:01:15.118840: step 333, loss 0.662636, acc 0.640625 2019-09-25T17:01:15.239471: step 334, loss 0.541445, acc 0.703125 2019-09-25T17:01:15.365198: step 335, loss 0.587446, acc 0.703125 2019-09-25T17:01:15.491431: step 336, loss 0.713817, acc 0.59375 2019-09-25T17:01:15.611339: step 337, loss 0.565555, acc 0.65625 2019-09-25T17:01:15.733527: step 338, loss 0.64559, acc 0.671875 2019-09-25T17:01:15.854600: step 339, loss 0.615991, acc 0.65625 2019-09-25T17:01:15.975901: step 340, loss 0.71496, acc 0.71875 2019-09-25T17:01:16.095381: step 341, loss 0.508191, acc 0.734375 2019-09-25T17:01:16.217354: step 342, loss 0.652432, acc 0.65625 2019-09-25T17:01:16.339219: step 343, loss 0.564221, acc 0.71875 2019-09-25T17:01:16.473244: step 344, loss 0.714472, acc 0.609375 2019-09-25T17:01:16.598000: step 345, loss 0.521222, acc 0.734375 2019-09-25T17:01:16.720153: step 346, loss 0.677754, acc 0.640625 2019-09-25T17:01:16.841072: step 347, loss 0.63799, acc 0.703125 2019-09-25T17:01:16.962546: step 348, loss 0.626888, acc 0.71875 2019-09-25T17:01:17.084374: step 349, loss 0.571257, acc 0.6875 2019-09-25T17:01:17.207182: step 350, loss 0.509235, acc 0.703125 2019-09-25T17:01:17.332636: step 351, loss 0.40015, acc 0.8125 2019-09-25T17:01:17.458453: step 352, loss 0.611773, acc 0.65625 2019-09-25T17:01:17.579322: step 353, loss 0.601949, acc 0.71875 2019-09-25T17:01:17.698662: step 354, loss 0.601247, acc 0.6875 2019-09-25T17:01:17.819477: step 355, loss 0.520137, acc 0.78125 2019-09-25T17:01:17.942049: step 356, loss 0.600476, acc 0.6875 2019-09-25T17:01:18.062102: step 357, loss 0.829998, acc 0.53125 2019-09-25T17:01:18.182962: step 358, loss 0.557073, acc 0.671875 2019-09-25T17:01:18.303409: step 359, loss 0.890719, acc 0.5 2019-09-25T17:01:18.425895: step 360, loss 0.663066, acc 0.625 2019-09-25T17:01:18.560150: step 361, loss 0.715332, acc 0.65625 2019-09-25T17:01:18.680543: step 362, loss 0.760002, acc 0.65625 2019-09-25T17:01:18.800676: step 363, loss 0.643314, acc 0.65625 2019-09-25T17:01:18.923434: step 364, loss 0.582085, acc 0.703125 2019-09-25T17:01:19.045129: step 365, loss 0.59142, acc 0.703125 2019-09-25T17:01:19.166172: step 366, loss 0.547284, acc 0.765625 2019-09-25T17:01:19.288332: step 367, loss 0.610791, acc 0.75 2019-09-25T17:01:19.412993: step 368, loss 0.667576, acc 0.6875 2019-09-25T17:01:19.541069: step 369, loss 0.626492, acc 0.640625 2019-09-25T17:01:19.662288: step 370, loss 0.559499, acc 0.71875 2019-09-25T17:01:19.783813: step 371, loss 0.622181, acc 0.671875 2019-09-25T17:01:19.905330: step 372, loss 0.695802, acc 0.578125 2019-09-25T17:01:20.026813: step 373, loss 0.504826, acc 0.75 2019-09-25T17:01:20.150940: step 374, loss 0.621312, acc 0.6875 2019-09-25T17:01:20.273441: step 375, loss 0.548769, acc 0.703125 2019-09-25T17:01:20.395664: step 376, loss 0.731306, acc 0.65625 2019-09-25T17:01:20.516840: step 377, loss 0.673, acc 0.671875 2019-09-25T17:01:20.652626: step 378, loss 0.746276, acc 0.671875 2019-09-25T17:01:20.777085: step 379, loss 0.605541, acc 0.71875 2019-09-25T17:01:20.899882: step 380, loss 0.556222, acc 0.703125 2019-09-25T17:01:21.021608: step 381, loss 0.582896, acc 0.6875 2019-09-25T17:01:21.145053: step 382, loss 0.756979, acc 0.609375 2019-09-25T17:01:21.266090: step 383, loss 0.649257, acc 0.640625 2019-09-25T17:01:21.388137: step 384, loss 0.686979, acc 0.65625 2019-09-25T17:01:21.511137: step 385, loss 0.70099, acc 0.640625 2019-09-25T17:01:21.643474: step 386, loss 0.716768, acc 0.625 2019-09-25T17:01:21.767758: step 387, loss 0.473888, acc 0.765625 2019-09-25T17:01:21.889490: step 388, loss 0.629925, acc 0.6875 2019-09-25T17:01:22.015476: step 389, loss 0.6003, acc 0.625 2019-09-25T17:01:22.139813: step 390, loss 0.730163, acc 0.53125 2019-09-25T17:01:22.259091: step 391, loss 0.693276, acc 0.65625 2019-09-25T17:01:22.382476: step 392, loss 0.616259, acc 0.734375 2019-09-25T17:01:22.503693: step 393, loss 0.64292, acc 0.6875 2019-09-25T17:01:22.628272: step 394, loss 0.586522, acc 0.6875 2019-09-25T17:01:22.758550: step 395, loss 0.697925, acc 0.609375 2019-09-25T17:01:22.881408: step 396, loss 0.685385, acc 0.625 2019-09-25T17:01:23.003149: step 397, loss 0.616073, acc 0.65625 2019-09-25T17:01:23.125148: step 398, loss 0.62277, acc 0.6875 2019-09-25T17:01:23.247282: step 399, loss 0.603959, acc 0.703125 2019-09-25T17:01:23.372897: step 400, loss 0.666554, acc 0.609375 Evaluation: 2019-09-25T17:01:23.597352: step 400, loss 0.643535, acc 0.616323 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-400 2019-09-25T17:01:23.868169: step 401, loss 0.612281, acc 0.671875 2019-09-25T17:01:23.987268: step 402, loss 0.659921, acc 0.65625 2019-09-25T17:01:24.108776: step 403, loss 0.602081, acc 0.6875 2019-09-25T17:01:24.229632: step 404, loss 0.569271, acc 0.65625 2019-09-25T17:01:24.352650: step 405, loss 0.558975, acc 0.703125 2019-09-25T17:01:24.474622: step 406, loss 0.644603, acc 0.65625 2019-09-25T17:01:24.593711: step 407, loss 0.661983, acc 0.65625 2019-09-25T17:01:24.719426: step 408, loss 0.577536, acc 0.71875 2019-09-25T17:01:24.842437: step 409, loss 0.617805, acc 0.71875 2019-09-25T17:01:24.964733: step 410, loss 0.649003, acc 0.625 2019-09-25T17:01:25.085584: step 411, loss 0.727724, acc 0.625 2019-09-25T17:01:25.208644: step 412, loss 0.564427, acc 0.75 2019-09-25T17:01:25.332193: step 413, loss 0.601647, acc 0.6875 2019-09-25T17:01:25.454665: step 414, loss 0.595591, acc 0.671875 2019-09-25T17:01:25.575071: step 415, loss 0.670972, acc 0.640625 2019-09-25T17:01:25.696043: step 416, loss 0.539179, acc 0.71875 2019-09-25T17:01:25.827619: step 417, loss 0.560034, acc 0.703125 2019-09-25T17:01:25.948540: step 418, loss 0.760244, acc 0.640625 2019-09-25T17:01:26.069633: step 419, loss 0.687865, acc 0.609375 2019-09-25T17:01:26.192374: step 420, loss 0.619596, acc 0.65625 2019-09-25T17:01:26.314557: step 421, loss 0.572864, acc 0.71875 2019-09-25T17:01:26.435025: step 422, loss 0.676354, acc 0.65625 2019-09-25T17:01:26.604491: step 423, loss 0.615857, acc 0.65625 2019-09-25T17:01:26.732658: step 424, loss 0.574176, acc 0.828125 2019-09-25T17:01:26.867083: step 425, loss 0.577984, acc 0.65625 2019-09-25T17:01:26.988065: step 426, loss 0.617042, acc 0.640625 2019-09-25T17:01:27.112190: step 427, loss 0.614756, acc 0.625 2019-09-25T17:01:27.233545: step 428, loss 0.799683, acc 0.5625 2019-09-25T17:01:27.355896: step 429, loss 0.691295, acc 0.625 2019-09-25T17:01:27.478301: step 430, loss 0.550714, acc 0.703125 2019-09-25T17:01:27.597707: step 431, loss 0.606839, acc 0.734375 2019-09-25T17:01:27.718902: step 432, loss 0.591562, acc 0.65625 2019-09-25T17:01:27.846713: step 433, loss 0.589437, acc 0.71875 2019-09-25T17:01:27.968029: step 434, loss 0.673001, acc 0.609375 2019-09-25T17:01:28.089486: step 435, loss 0.610465, acc 0.671875 2019-09-25T17:01:28.210779: step 436, loss 0.693194, acc 0.625 2019-09-25T17:01:28.333646: step 437, loss 0.652973, acc 0.703125 2019-09-25T17:01:28.454073: step 438, loss 0.571647, acc 0.6875 2019-09-25T17:01:28.575389: step 439, loss 0.585215, acc 0.703125 2019-09-25T17:01:28.698348: step 440, loss 0.614742, acc 0.6875 2019-09-25T17:01:28.823162: step 441, loss 0.626948, acc 0.703125 2019-09-25T17:01:28.955991: step 442, loss 0.617874, acc 0.734375 2019-09-25T17:01:29.076459: step 443, loss 0.754996, acc 0.640625 2019-09-25T17:01:29.196800: step 444, loss 0.583094, acc 0.6875 2019-09-25T17:01:29.322857: step 445, loss 0.65026, acc 0.703125 2019-09-25T17:01:29.442547: step 446, loss 0.740047, acc 0.640625 2019-09-25T17:01:29.563635: step 447, loss 0.615953, acc 0.65625 2019-09-25T17:01:29.686003: step 448, loss 0.580786, acc 0.625 2019-09-25T17:01:29.808696: step 449, loss 0.572086, acc 0.671875 2019-09-25T17:01:29.929527: step 450, loss 0.593447, acc 0.716667 2019-09-25T17:01:30.055625: step 451, loss 0.67114, acc 0.65625 2019-09-25T17:01:30.180635: step 452, loss 0.623635, acc 0.640625 2019-09-25T17:01:30.300376: step 453, loss 0.522164, acc 0.75 2019-09-25T17:01:30.424058: step 454, loss 0.588141, acc 0.71875 2019-09-25T17:01:30.547410: step 455, loss 0.538498, acc 0.78125 2019-09-25T17:01:30.667781: step 456, loss 0.513717, acc 0.734375 2019-09-25T17:01:30.788317: step 457, loss 0.593682, acc 0.65625 2019-09-25T17:01:30.911494: step 458, loss 0.586315, acc 0.609375 2019-09-25T17:01:31.043696: step 459, loss 0.611868, acc 0.6875 2019-09-25T17:01:31.163757: step 460, loss 0.563965, acc 0.703125 2019-09-25T17:01:31.284694: step 461, loss 0.582756, acc 0.75 2019-09-25T17:01:31.407254: step 462, loss 0.703377, acc 0.59375 2019-09-25T17:01:31.529120: step 463, loss 0.50653, acc 0.734375 2019-09-25T17:01:31.651685: step 464, loss 0.65696, acc 0.671875 2019-09-25T17:01:31.774615: step 465, loss 0.655539, acc 0.671875 2019-09-25T17:01:31.895403: step 466, loss 0.572684, acc 0.703125 2019-09-25T17:01:32.024532: step 467, loss 0.634628, acc 0.6875 2019-09-25T17:01:32.150338: step 468, loss 0.646868, acc 0.6875 2019-09-25T17:01:32.270466: step 469, loss 0.549639, acc 0.703125 2019-09-25T17:01:32.395683: step 470, loss 0.483421, acc 0.765625 2019-09-25T17:01:32.516722: step 471, loss 0.599108, acc 0.65625 2019-09-25T17:01:32.638357: step 472, loss 0.664264, acc 0.734375 2019-09-25T17:01:32.758942: step 473, loss 0.634364, acc 0.640625 2019-09-25T17:01:32.882993: step 474, loss 0.54726, acc 0.734375 2019-09-25T17:01:33.003783: step 475, loss 0.620988, acc 0.65625 2019-09-25T17:01:33.137159: step 476, loss 0.707779, acc 0.5625 2019-09-25T17:01:33.258648: step 477, loss 0.701676, acc 0.65625 2019-09-25T17:01:33.382731: step 478, loss 0.647191, acc 0.671875 2019-09-25T17:01:33.505856: step 479, loss 0.468046, acc 0.859375 2019-09-25T17:01:33.628459: step 480, loss 0.623642, acc 0.65625 2019-09-25T17:01:33.750106: step 481, loss 0.55772, acc 0.6875 2019-09-25T17:01:33.877729: step 482, loss 0.700748, acc 0.640625 2019-09-25T17:01:33.998699: step 483, loss 0.575177, acc 0.71875 2019-09-25T17:01:34.136088: step 484, loss 0.605149, acc 0.71875 2019-09-25T17:01:34.259130: step 485, loss 0.651148, acc 0.5625 2019-09-25T17:01:34.384667: step 486, loss 0.572847, acc 0.65625 2019-09-25T17:01:34.504975: step 487, loss 0.678919, acc 0.6875 2019-09-25T17:01:34.626094: step 488, loss 0.519665, acc 0.671875 2019-09-25T17:01:34.749963: step 489, loss 0.541058, acc 0.734375 2019-09-25T17:01:34.871661: step 490, loss 0.612611, acc 0.65625 2019-09-25T17:01:34.992100: step 491, loss 0.605366, acc 0.65625 2019-09-25T17:01:35.116327: step 492, loss 0.550535, acc 0.71875 2019-09-25T17:01:35.243463: step 493, loss 0.569431, acc 0.671875 2019-09-25T17:01:35.367141: step 494, loss 0.590314, acc 0.703125 2019-09-25T17:01:35.488937: step 495, loss 0.472219, acc 0.75 2019-09-25T17:01:35.612463: step 496, loss 0.534251, acc 0.78125 2019-09-25T17:01:35.734185: step 497, loss 0.622733, acc 0.609375 2019-09-25T17:01:35.858116: step 498, loss 0.599414, acc 0.640625 2019-09-25T17:01:35.985525: step 499, loss 0.637957, acc 0.671875 2019-09-25T17:01:36.108032: step 500, loss 0.521443, acc 0.71875 Evaluation: 2019-09-25T17:01:36.324221: step 500, loss 0.614785, acc 0.657598 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-500 2019-09-25T17:01:36.595184: step 501, loss 0.553936, acc 0.671875 2019-09-25T17:01:36.757225: step 502, loss 0.566401, acc 0.6875 2019-09-25T17:01:36.903331: step 503, loss 0.769649, acc 0.53125 2019-09-25T17:01:37.028357: step 504, loss 0.468345, acc 0.84375 2019-09-25T17:01:37.150938: step 505, loss 0.46439, acc 0.796875 2019-09-25T17:01:37.282429: step 506, loss 0.587787, acc 0.609375 2019-09-25T17:01:37.405856: step 507, loss 0.668508, acc 0.65625 2019-09-25T17:01:37.529447: step 508, loss 0.644042, acc 0.5625 2019-09-25T17:01:37.649034: step 509, loss 0.567701, acc 0.703125 2019-09-25T17:01:37.770387: step 510, loss 0.524848, acc 0.78125 2019-09-25T17:01:37.891206: step 511, loss 0.569541, acc 0.765625 2019-09-25T17:01:38.012351: step 512, loss 0.525944, acc 0.765625 2019-09-25T17:01:38.133043: step 513, loss 0.545943, acc 0.71875 2019-09-25T17:01:38.254261: step 514, loss 0.663944, acc 0.65625 2019-09-25T17:01:38.387542: step 515, loss 0.640098, acc 0.734375 2019-09-25T17:01:38.508497: step 516, loss 0.520445, acc 0.796875 2019-09-25T17:01:38.628116: step 517, loss 0.452897, acc 0.796875 2019-09-25T17:01:38.749054: step 518, loss 0.527696, acc 0.75 2019-09-25T17:01:38.870499: step 519, loss 0.629976, acc 0.640625 2019-09-25T17:01:38.996031: step 520, loss 0.612937, acc 0.625 2019-09-25T17:01:39.117688: step 521, loss 0.453139, acc 0.796875 2019-09-25T17:01:39.238482: step 522, loss 0.525869, acc 0.75 2019-09-25T17:01:39.381087: step 523, loss 0.568826, acc 0.6875 2019-09-25T17:01:39.505257: step 524, loss 0.68895, acc 0.59375 2019-09-25T17:01:39.628547: step 525, loss 0.58908, acc 0.703125 2019-09-25T17:01:39.750523: step 526, loss 0.640056, acc 0.6875 2019-09-25T17:01:39.874959: step 527, loss 0.564455, acc 0.75 2019-09-25T17:01:39.994951: step 528, loss 0.594239, acc 0.6875 2019-09-25T17:01:40.115750: step 529, loss 0.603683, acc 0.609375 2019-09-25T17:01:40.236158: step 530, loss 0.521372, acc 0.734375 2019-09-25T17:01:40.365349: step 531, loss 0.685191, acc 0.65625 2019-09-25T17:01:40.487405: step 532, loss 0.637832, acc 0.75 2019-09-25T17:01:40.610165: step 533, loss 0.574185, acc 0.71875 2019-09-25T17:01:40.732270: step 534, loss 0.682693, acc 0.609375 2019-09-25T17:01:40.855579: step 535, loss 0.587128, acc 0.75 2019-09-25T17:01:40.975821: step 536, loss 0.63772, acc 0.6875 2019-09-25T17:01:41.099099: step 537, loss 0.584384, acc 0.765625 2019-09-25T17:01:41.222388: step 538, loss 0.468857, acc 0.765625 2019-09-25T17:01:41.345568: step 539, loss 0.601708, acc 0.640625 2019-09-25T17:01:41.483682: step 540, loss 0.586307, acc 0.71875 2019-09-25T17:01:41.606629: step 541, loss 0.524954, acc 0.6875 2019-09-25T17:01:41.730401: step 542, loss 0.578444, acc 0.71875 2019-09-25T17:01:41.853312: step 543, loss 0.658842, acc 0.6875 2019-09-25T17:01:41.974422: step 544, loss 0.613784, acc 0.671875 2019-09-25T17:01:42.097569: step 545, loss 0.508054, acc 0.78125 2019-09-25T17:01:42.217690: step 546, loss 0.561783, acc 0.6875 2019-09-25T17:01:42.424296: step 547, loss 0.641645, acc 0.671875 2019-09-25T17:01:42.587623: step 548, loss 0.501911, acc 0.734375 2019-09-25T17:01:42.764070: step 549, loss 0.643457, acc 0.6875 2019-09-25T17:01:42.931794: step 550, loss 0.543219, acc 0.78125 2019-09-25T17:01:43.101476: step 551, loss 0.421632, acc 0.796875 2019-09-25T17:01:43.263154: step 552, loss 0.596066, acc 0.640625 2019-09-25T17:01:43.395172: step 553, loss 0.468946, acc 0.84375 2019-09-25T17:01:43.523144: step 554, loss 0.491157, acc 0.734375 2019-09-25T17:01:43.647968: step 555, loss 0.468748, acc 0.765625 2019-09-25T17:01:43.771993: step 556, loss 0.588912, acc 0.6875 2019-09-25T17:01:43.899726: step 557, loss 0.478449, acc 0.796875 2019-09-25T17:01:44.021523: step 558, loss 0.606875, acc 0.6875 2019-09-25T17:01:44.146337: step 559, loss 0.498846, acc 0.671875 2019-09-25T17:01:44.271589: step 560, loss 0.645389, acc 0.703125 2019-09-25T17:01:44.399393: step 561, loss 0.56097, acc 0.765625 2019-09-25T17:01:44.526284: step 562, loss 0.500467, acc 0.71875 2019-09-25T17:01:44.651982: step 563, loss 0.511234, acc 0.71875 2019-09-25T17:01:44.778785: step 564, loss 0.564425, acc 0.6875 2019-09-25T17:01:44.906714: step 565, loss 0.523345, acc 0.828125 2019-09-25T17:01:45.031810: step 566, loss 0.412118, acc 0.828125 2019-09-25T17:01:45.153310: step 567, loss 0.55432, acc 0.65625 2019-09-25T17:01:45.271290: step 568, loss 0.552783, acc 0.78125 2019-09-25T17:01:45.393667: step 569, loss 0.508981, acc 0.78125 2019-09-25T17:01:45.530482: step 570, loss 0.714165, acc 0.640625 2019-09-25T17:01:45.664773: step 571, loss 0.458921, acc 0.765625 2019-09-25T17:01:45.787258: step 572, loss 0.540696, acc 0.75 2019-09-25T17:01:45.911697: step 573, loss 0.557274, acc 0.6875 2019-09-25T17:01:46.033793: step 574, loss 0.600512, acc 0.6875 2019-09-25T17:01:46.153496: step 575, loss 0.505636, acc 0.703125 2019-09-25T17:01:46.275683: step 576, loss 0.537131, acc 0.734375 2019-09-25T17:01:46.398720: step 577, loss 0.607628, acc 0.65625 2019-09-25T17:01:46.517234: step 578, loss 0.515598, acc 0.71875 2019-09-25T17:01:46.661733: step 579, loss 0.493602, acc 0.6875 2019-09-25T17:01:46.784391: step 580, loss 0.507427, acc 0.78125 2019-09-25T17:01:46.905285: step 581, loss 0.568198, acc 0.703125 2019-09-25T17:01:47.026035: step 582, loss 0.696586, acc 0.671875 2019-09-25T17:01:47.147602: step 583, loss 0.599591, acc 0.6875 2019-09-25T17:01:47.268960: step 584, loss 0.612452, acc 0.65625 2019-09-25T17:01:47.390073: step 585, loss 0.505426, acc 0.671875 2019-09-25T17:01:47.510740: step 586, loss 0.579198, acc 0.734375 2019-09-25T17:01:47.633600: step 587, loss 0.611457, acc 0.71875 2019-09-25T17:01:47.755912: step 588, loss 0.608636, acc 0.6875 2019-09-25T17:01:47.877220: step 589, loss 0.53435, acc 0.75 2019-09-25T17:01:47.998745: step 590, loss 0.747925, acc 0.53125 2019-09-25T17:01:48.122492: step 591, loss 0.613667, acc 0.6875 2019-09-25T17:01:48.242759: step 592, loss 0.582743, acc 0.671875 2019-09-25T17:01:48.364254: step 593, loss 0.555652, acc 0.6875 2019-09-25T17:01:48.484094: step 594, loss 0.558107, acc 0.78125 2019-09-25T17:01:48.604392: step 595, loss 0.440091, acc 0.8125 2019-09-25T17:01:48.737521: step 596, loss 0.651594, acc 0.65625 2019-09-25T17:01:48.859721: step 597, loss 0.524577, acc 0.75 2019-09-25T17:01:48.985700: step 598, loss 0.502542, acc 0.734375 2019-09-25T17:01:49.107491: step 599, loss 0.645649, acc 0.6875 2019-09-25T17:01:49.221724: step 600, loss 0.541669, acc 0.783333 Evaluation:
W0925 17:01:49.514442 139759124096384 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:960: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version. Instructions for updating: Use standard file APIs to delete files with this prefix.
2019-09-25T17:01:49.435614: step 600, loss 0.654792, acc 0.589118 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-600 2019-09-25T17:01:49.703733: step 601, loss 0.464673, acc 0.78125 2019-09-25T17:01:49.831425: step 602, loss 0.550268, acc 0.6875 2019-09-25T17:01:49.950670: step 603, loss 0.508911, acc 0.71875 2019-09-25T17:01:50.075143: step 604, loss 0.555151, acc 0.734375 2019-09-25T17:01:50.194317: step 605, loss 0.477287, acc 0.78125 2019-09-25T17:01:50.315608: step 606, loss 0.542971, acc 0.65625 2019-09-25T17:01:50.438980: step 607, loss 0.440176, acc 0.78125 2019-09-25T17:01:50.559178: step 608, loss 0.517115, acc 0.734375 2019-09-25T17:01:50.680468: step 609, loss 0.561891, acc 0.6875 2019-09-25T17:01:50.813860: step 610, loss 0.490129, acc 0.78125 2019-09-25T17:01:50.936120: step 611, loss 0.461581, acc 0.734375 2019-09-25T17:01:51.054606: step 612, loss 0.49028, acc 0.78125 2019-09-25T17:01:51.175268: step 613, loss 0.484613, acc 0.78125 2019-09-25T17:01:51.295564: step 614, loss 0.530218, acc 0.734375 2019-09-25T17:01:51.417369: step 615, loss 0.566734, acc 0.734375 2019-09-25T17:01:51.539943: step 616, loss 0.456379, acc 0.796875 2019-09-25T17:01:51.660347: step 617, loss 0.596074, acc 0.703125 2019-09-25T17:01:51.810248: step 618, loss 0.565076, acc 0.75 2019-09-25T17:01:51.932692: step 619, loss 0.517301, acc 0.71875 2019-09-25T17:01:52.052938: step 620, loss 0.551402, acc 0.734375 2019-09-25T17:01:52.176989: step 621, loss 0.53336, acc 0.75 2019-09-25T17:01:52.296668: step 622, loss 0.475822, acc 0.78125 2019-09-25T17:01:52.420509: step 623, loss 0.525038, acc 0.765625 2019-09-25T17:01:52.546976: step 624, loss 0.484299, acc 0.75 2019-09-25T17:01:52.669206: step 625, loss 0.408664, acc 0.8125 2019-09-25T17:01:52.792943: step 626, loss 0.460744, acc 0.78125 2019-09-25T17:01:52.926101: step 627, loss 0.452131, acc 0.765625 2019-09-25T17:01:53.045515: step 628, loss 0.482723, acc 0.734375 2019-09-25T17:01:53.167208: step 629, loss 0.481587, acc 0.78125 2019-09-25T17:01:53.289755: step 630, loss 0.552063, acc 0.6875 2019-09-25T17:01:53.411351: step 631, loss 0.539859, acc 0.75 2019-09-25T17:01:53.530869: step 632, loss 0.594444, acc 0.703125 2019-09-25T17:01:53.650893: step 633, loss 0.458018, acc 0.734375 2019-09-25T17:01:53.771660: step 634, loss 0.524334, acc 0.734375 2019-09-25T17:01:53.898565: step 635, loss 0.493896, acc 0.71875 2019-09-25T17:01:54.018342: step 636, loss 0.555521, acc 0.75 2019-09-25T17:01:54.137392: step 637, loss 0.628914, acc 0.6875 2019-09-25T17:01:54.258740: step 638, loss 0.374415, acc 0.828125 2019-09-25T17:01:54.382353: step 639, loss 0.469818, acc 0.828125 2019-09-25T17:01:54.506439: step 640, loss 0.468543, acc 0.78125 2019-09-25T17:01:54.625512: step 641, loss 0.541814, acc 0.6875 2019-09-25T17:01:54.747240: step 642, loss 0.44209, acc 0.84375 2019-09-25T17:01:54.869522: step 643, loss 0.608506, acc 0.734375 2019-09-25T17:01:55.004990: step 644, loss 0.61475, acc 0.671875 2019-09-25T17:01:55.126991: step 645, loss 0.592814, acc 0.640625 2019-09-25T17:01:55.247798: step 646, loss 0.52008, acc 0.765625 2019-09-25T17:01:55.369674: step 647, loss 0.54192, acc 0.75 2019-09-25T17:01:55.489364: step 648, loss 0.500128, acc 0.765625 2019-09-25T17:01:55.611200: step 649, loss 0.479188, acc 0.6875 2019-09-25T17:01:55.733901: step 650, loss 0.398171, acc 0.890625 2019-09-25T17:01:55.855733: step 651, loss 0.441836, acc 0.828125 2019-09-25T17:01:55.983480: step 652, loss 0.473307, acc 0.734375 2019-09-25T17:01:56.105921: step 653, loss 0.621752, acc 0.765625 2019-09-25T17:01:56.229205: step 654, loss 0.473237, acc 0.734375 2019-09-25T17:01:56.353349: step 655, loss 0.506589, acc 0.71875 2019-09-25T17:01:56.475261: step 656, loss 0.542686, acc 0.703125 2019-09-25T17:01:56.597685: step 657, loss 0.519064, acc 0.75 2019-09-25T17:01:56.722337: step 658, loss 0.502195, acc 0.78125 2019-09-25T17:01:56.843198: step 659, loss 0.370167, acc 0.8125 2019-09-25T17:01:56.964437: step 660, loss 0.484459, acc 0.71875 2019-09-25T17:01:57.093292: step 661, loss 0.595008, acc 0.703125 2019-09-25T17:01:57.213278: step 662, loss 0.47717, acc 0.796875 2019-09-25T17:01:57.334165: step 663, loss 0.531209, acc 0.78125 2019-09-25T17:01:57.455406: step 664, loss 0.606772, acc 0.703125 2019-09-25T17:01:57.575670: step 665, loss 0.538568, acc 0.6875 2019-09-25T17:01:57.696315: step 666, loss 0.569207, acc 0.6875 2019-09-25T17:01:57.818951: step 667, loss 0.509372, acc 0.75 2019-09-25T17:01:57.942100: step 668, loss 0.555152, acc 0.75 2019-09-25T17:01:58.075615: step 669, loss 0.574579, acc 0.6875 2019-09-25T17:01:58.198556: step 670, loss 0.521745, acc 0.71875 2019-09-25T17:01:58.319344: step 671, loss 0.447188, acc 0.796875 2019-09-25T17:01:58.444222: step 672, loss 0.575175, acc 0.71875 2019-09-25T17:01:58.565225: step 673, loss 0.435158, acc 0.828125 2019-09-25T17:01:58.685266: step 674, loss 0.511227, acc 0.71875 2019-09-25T17:01:58.806559: step 675, loss 0.470033, acc 0.734375 2019-09-25T17:01:58.926533: step 676, loss 0.561145, acc 0.6875 2019-09-25T17:01:59.050360: step 677, loss 0.42089, acc 0.796875 2019-09-25T17:01:59.173266: step 678, loss 0.582986, acc 0.65625 2019-09-25T17:01:59.297721: step 679, loss 0.504369, acc 0.75 2019-09-25T17:01:59.423683: step 680, loss 0.569304, acc 0.765625 2019-09-25T17:01:59.543655: step 681, loss 0.460958, acc 0.828125 2019-09-25T17:01:59.664710: step 682, loss 0.51434, acc 0.71875 2019-09-25T17:01:59.783794: step 683, loss 0.572352, acc 0.75 2019-09-25T17:01:59.928703: step 684, loss 0.433408, acc 0.828125 2019-09-25T17:02:00.050621: step 685, loss 0.387163, acc 0.84375 2019-09-25T17:02:00.182362: step 686, loss 0.469915, acc 0.84375 2019-09-25T17:02:00.305395: step 687, loss 0.596656, acc 0.71875 2019-09-25T17:02:00.426508: step 688, loss 0.528335, acc 0.765625 2019-09-25T17:02:00.547651: step 689, loss 0.498854, acc 0.734375 2019-09-25T17:02:00.668577: step 690, loss 0.567603, acc 0.75 2019-09-25T17:02:00.788639: step 691, loss 0.395389, acc 0.859375 2019-09-25T17:02:00.911266: step 692, loss 0.506509, acc 0.765625 2019-09-25T17:02:01.036480: step 693, loss 0.487247, acc 0.765625 2019-09-25T17:02:01.165872: step 694, loss 0.482843, acc 0.75 2019-09-25T17:02:01.287650: step 695, loss 0.382439, acc 0.8125 2019-09-25T17:02:01.408194: step 696, loss 0.481946, acc 0.71875 2019-09-25T17:02:01.529005: step 697, loss 0.441991, acc 0.828125 2019-09-25T17:02:01.655050: step 698, loss 0.635988, acc 0.65625 2019-09-25T17:02:01.776199: step 699, loss 0.591615, acc 0.75 2019-09-25T17:02:01.900320: step 700, loss 0.526717, acc 0.703125 Evaluation: 2019-09-25T17:02:02.109398: step 700, loss 0.591631, acc 0.682927 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-700 2019-09-25T17:02:02.360687: step 701, loss 0.477395, acc 0.765625 2019-09-25T17:02:02.480124: step 702, loss 0.557292, acc 0.6875 2019-09-25T17:02:02.601072: step 703, loss 0.489522, acc 0.828125 2019-09-25T17:02:02.722286: step 704, loss 0.538139, acc 0.640625 2019-09-25T17:02:02.844916: step 705, loss 0.43051, acc 0.78125 2019-09-25T17:02:02.966402: step 706, loss 0.580601, acc 0.75 2019-09-25T17:02:03.086725: step 707, loss 0.5182, acc 0.71875 2019-09-25T17:02:03.212656: step 708, loss 0.49452, acc 0.75 2019-09-25T17:02:03.338074: step 709, loss 0.716439, acc 0.65625 2019-09-25T17:02:03.459140: step 710, loss 0.491153, acc 0.71875 2019-09-25T17:02:03.580835: step 711, loss 0.454173, acc 0.765625 2019-09-25T17:02:03.702745: step 712, loss 0.521845, acc 0.671875 2019-09-25T17:02:03.829462: step 713, loss 0.508298, acc 0.765625 2019-09-25T17:02:03.952418: step 714, loss 0.540271, acc 0.78125 2019-09-25T17:02:04.073752: step 715, loss 0.362944, acc 0.875 2019-09-25T17:02:04.194082: step 716, loss 0.562568, acc 0.734375 2019-09-25T17:02:04.333091: step 717, loss 0.476388, acc 0.796875 2019-09-25T17:02:04.469125: step 718, loss 0.539145, acc 0.703125 2019-09-25T17:02:04.603045: step 719, loss 0.402059, acc 0.859375 2019-09-25T17:02:04.740104: step 720, loss 0.566366, acc 0.75 2019-09-25T17:02:04.881491: step 721, loss 0.512646, acc 0.6875 2019-09-25T17:02:05.020319: step 722, loss 0.549241, acc 0.78125 2019-09-25T17:02:05.158443: step 723, loss 0.413513, acc 0.78125 2019-09-25T17:02:05.298154: step 724, loss 0.530624, acc 0.765625 2019-09-25T17:02:05.421985: step 725, loss 0.536006, acc 0.734375 2019-09-25T17:02:05.544546: step 726, loss 0.576867, acc 0.71875 2019-09-25T17:02:05.666354: step 727, loss 0.429525, acc 0.796875 2019-09-25T17:02:05.791287: step 728, loss 0.534883, acc 0.6875 2019-09-25T17:02:05.916312: step 729, loss 0.531302, acc 0.734375 2019-09-25T17:02:06.039300: step 730, loss 0.559124, acc 0.703125 2019-09-25T17:02:06.163441: step 731, loss 0.528581, acc 0.71875 2019-09-25T17:02:06.291904: step 732, loss 0.614509, acc 0.640625 2019-09-25T17:02:06.425884: step 733, loss 0.521095, acc 0.78125 2019-09-25T17:02:06.551967: step 734, loss 0.551954, acc 0.734375 2019-09-25T17:02:06.675987: step 735, loss 0.488259, acc 0.796875 2019-09-25T17:02:06.800197: step 736, loss 0.567217, acc 0.703125 2019-09-25T17:02:06.949375: step 737, loss 0.519581, acc 0.78125 2019-09-25T17:02:07.078696: step 738, loss 0.576225, acc 0.71875 2019-09-25T17:02:07.204872: step 739, loss 0.493542, acc 0.765625 2019-09-25T17:02:07.337034: step 740, loss 0.626232, acc 0.640625 2019-09-25T17:02:07.462387: step 741, loss 0.591957, acc 0.671875 2019-09-25T17:02:07.584241: step 742, loss 0.512965, acc 0.765625 2019-09-25T17:02:07.708981: step 743, loss 0.520539, acc 0.734375 2019-09-25T17:02:07.832092: step 744, loss 0.542239, acc 0.6875 2019-09-25T17:02:07.957512: step 745, loss 0.524346, acc 0.78125 2019-09-25T17:02:08.081180: step 746, loss 0.526111, acc 0.734375 2019-09-25T17:02:08.205876: step 747, loss 0.489551, acc 0.75 2019-09-25T17:02:08.331988: step 748, loss 0.562492, acc 0.734375 2019-09-25T17:02:08.470469: step 749, loss 0.455333, acc 0.828125 2019-09-25T17:02:08.590318: step 750, loss 0.474234, acc 0.8 2019-09-25T17:02:08.716649: step 751, loss 0.460752, acc 0.75 2019-09-25T17:02:08.840062: step 752, loss 0.432615, acc 0.84375 2019-09-25T17:02:08.964154: step 753, loss 0.546027, acc 0.796875 2019-09-25T17:02:09.085056: step 754, loss 0.470433, acc 0.78125 2019-09-25T17:02:09.208944: step 755, loss 0.445246, acc 0.8125 2019-09-25T17:02:09.340664: step 756, loss 0.40645, acc 0.859375 2019-09-25T17:02:09.476496: step 757, loss 0.381775, acc 0.84375 2019-09-25T17:02:09.599898: step 758, loss 0.326633, acc 0.890625 2019-09-25T17:02:09.723617: step 759, loss 0.534528, acc 0.75 2019-09-25T17:02:09.847468: step 760, loss 0.463807, acc 0.75 2019-09-25T17:02:09.970746: step 761, loss 0.408036, acc 0.859375 2019-09-25T17:02:10.093420: step 762, loss 0.448978, acc 0.75 2019-09-25T17:02:10.212184: step 763, loss 0.464328, acc 0.796875 2019-09-25T17:02:10.338334: step 764, loss 0.340388, acc 0.890625 2019-09-25T17:02:10.466493: step 765, loss 0.431045, acc 0.8125 2019-09-25T17:02:10.587558: step 766, loss 0.351894, acc 0.84375 2019-09-25T17:02:10.714265: step 767, loss 0.359074, acc 0.84375 2019-09-25T17:02:10.837124: step 768, loss 0.401318, acc 0.84375 2019-09-25T17:02:10.961683: step 769, loss 0.390499, acc 0.84375 2019-09-25T17:02:11.081973: step 770, loss 0.420632, acc 0.8125 2019-09-25T17:02:11.202814: step 771, loss 0.341359, acc 0.890625 2019-09-25T17:02:11.330739: step 772, loss 0.458102, acc 0.78125 2019-09-25T17:02:11.453537: step 773, loss 0.34899, acc 0.875 2019-09-25T17:02:11.584397: step 774, loss 0.348066, acc 0.859375 2019-09-25T17:02:11.704253: step 775, loss 0.458268, acc 0.8125 2019-09-25T17:02:11.830729: step 776, loss 0.467581, acc 0.75 2019-09-25T17:02:11.953804: step 777, loss 0.488613, acc 0.78125 2019-09-25T17:02:12.076048: step 778, loss 0.420786, acc 0.796875 2019-09-25T17:02:12.197293: step 779, loss 0.533269, acc 0.6875 2019-09-25T17:02:12.320044: step 780, loss 0.512678, acc 0.75 2019-09-25T17:02:12.447410: step 781, loss 0.460267, acc 0.78125 2019-09-25T17:02:12.580545: step 782, loss 0.395404, acc 0.796875 2019-09-25T17:02:12.703190: step 783, loss 0.398668, acc 0.8125 2019-09-25T17:02:12.829715: step 784, loss 0.423969, acc 0.78125 2019-09-25T17:02:12.953925: step 785, loss 0.459886, acc 0.703125 2019-09-25T17:02:13.077571: step 786, loss 0.524506, acc 0.71875 2019-09-25T17:02:13.200960: step 787, loss 0.331059, acc 0.875 2019-09-25T17:02:13.323783: step 788, loss 0.476849, acc 0.8125 2019-09-25T17:02:13.446778: step 789, loss 0.485159, acc 0.78125 2019-09-25T17:02:13.576383: step 790, loss 0.446869, acc 0.78125 2019-09-25T17:02:13.700118: step 791, loss 0.334806, acc 0.859375 2019-09-25T17:02:13.827714: step 792, loss 0.434772, acc 0.78125 2019-09-25T17:02:13.949837: step 793, loss 0.424567, acc 0.8125 2019-09-25T17:02:14.073736: step 794, loss 0.60442, acc 0.6875 2019-09-25T17:02:14.197430: step 795, loss 0.357483, acc 0.859375 2019-09-25T17:02:14.321179: step 796, loss 0.391979, acc 0.828125 2019-09-25T17:02:14.445216: step 797, loss 0.447373, acc 0.828125 2019-09-25T17:02:14.567755: step 798, loss 0.51335, acc 0.75 2019-09-25T17:02:14.703249: step 799, loss 0.437203, acc 0.796875 2019-09-25T17:02:14.828599: step 800, loss 0.325491, acc 0.875 Evaluation: 2019-09-25T17:02:15.042600: step 800, loss 0.586454, acc 0.690432 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-800 2019-09-25T17:02:15.298929: step 801, loss 0.418568, acc 0.796875 2019-09-25T17:02:15.421381: step 802, loss 0.429215, acc 0.875 2019-09-25T17:02:15.543628: step 803, loss 0.535434, acc 0.6875 2019-09-25T17:02:15.669059: step 804, loss 0.429515, acc 0.765625 2019-09-25T17:02:15.793679: step 805, loss 0.473823, acc 0.75 2019-09-25T17:02:15.913831: step 806, loss 0.474621, acc 0.75 2019-09-25T17:02:16.034679: step 807, loss 0.413181, acc 0.796875 2019-09-25T17:02:16.153396: step 808, loss 0.451014, acc 0.796875 2019-09-25T17:02:16.274246: step 809, loss 0.491742, acc 0.734375 2019-09-25T17:02:16.398165: step 810, loss 0.421862, acc 0.796875 2019-09-25T17:02:16.517678: step 811, loss 0.396202, acc 0.828125 2019-09-25T17:02:16.642623: step 812, loss 0.494012, acc 0.78125 2019-09-25T17:02:16.775798: step 813, loss 0.392785, acc 0.78125 2019-09-25T17:02:16.897564: step 814, loss 0.430316, acc 0.796875 2019-09-25T17:02:17.051233: step 815, loss 0.578508, acc 0.71875 2019-09-25T17:02:17.174772: step 816, loss 0.487451, acc 0.796875 2019-09-25T17:02:17.296458: step 817, loss 0.460608, acc 0.796875 2019-09-25T17:02:17.416253: step 818, loss 0.428838, acc 0.828125 2019-09-25T17:02:17.537625: step 819, loss 0.397682, acc 0.8125 2019-09-25T17:02:17.659262: step 820, loss 0.524266, acc 0.6875 2019-09-25T17:02:17.795680: step 821, loss 0.47386, acc 0.828125 2019-09-25T17:02:17.918502: step 822, loss 0.407647, acc 0.8125 2019-09-25T17:02:18.037705: step 823, loss 0.342305, acc 0.875 2019-09-25T17:02:18.160400: step 824, loss 0.483606, acc 0.75 2019-09-25T17:02:18.283307: step 825, loss 0.458428, acc 0.796875 2019-09-25T17:02:18.406824: step 826, loss 0.316607, acc 0.859375 2019-09-25T17:02:18.528829: step 827, loss 0.652569, acc 0.671875 2019-09-25T17:02:18.653907: step 828, loss 0.332603, acc 0.890625 2019-09-25T17:02:18.789198: step 829, loss 0.282519, acc 0.859375 2019-09-25T17:02:18.911450: step 830, loss 0.409686, acc 0.796875 2019-09-25T17:02:19.033922: step 831, loss 0.470797, acc 0.734375 2019-09-25T17:02:19.153088: step 832, loss 0.498275, acc 0.765625 2019-09-25T17:02:19.274162: step 833, loss 0.394818, acc 0.78125 2019-09-25T17:02:19.401573: step 834, loss 0.486335, acc 0.765625 2019-09-25T17:02:19.522930: step 835, loss 0.382493, acc 0.84375 2019-09-25T17:02:19.645262: step 836, loss 0.541618, acc 0.71875 2019-09-25T17:02:19.768202: step 837, loss 0.356024, acc 0.828125 2019-09-25T17:02:19.904598: step 838, loss 0.374452, acc 0.84375 2019-09-25T17:02:20.028437: step 839, loss 0.37604, acc 0.84375 2019-09-25T17:02:20.150428: step 840, loss 0.445347, acc 0.78125 2019-09-25T17:02:20.273814: step 841, loss 0.574728, acc 0.75 2019-09-25T17:02:20.396652: step 842, loss 0.579085, acc 0.734375 2019-09-25T17:02:20.519417: step 843, loss 0.405336, acc 0.796875 2019-09-25T17:02:20.641678: step 844, loss 0.381789, acc 0.875 2019-09-25T17:02:20.763391: step 845, loss 0.592908, acc 0.6875 2019-09-25T17:02:20.893759: step 846, loss 0.38797, acc 0.8125 2019-09-25T17:02:21.017587: step 847, loss 0.374257, acc 0.84375 2019-09-25T17:02:21.139962: step 848, loss 0.459034, acc 0.75 2019-09-25T17:02:21.260654: step 849, loss 0.409401, acc 0.828125 2019-09-25T17:02:21.381991: step 850, loss 0.368804, acc 0.8125 2019-09-25T17:02:21.501047: step 851, loss 0.335968, acc 0.890625 2019-09-25T17:02:21.619955: step 852, loss 0.487313, acc 0.765625 2019-09-25T17:02:21.741441: step 853, loss 0.544818, acc 0.8125 2019-09-25T17:02:21.865743: step 854, loss 0.515741, acc 0.75 2019-09-25T17:02:21.991941: step 855, loss 0.402587, acc 0.8125 2019-09-25T17:02:22.116631: step 856, loss 0.446619, acc 0.8125 2019-09-25T17:02:22.236245: step 857, loss 0.345493, acc 0.90625 2019-09-25T17:02:22.356562: step 858, loss 0.539234, acc 0.75 2019-09-25T17:02:22.477533: step 859, loss 0.438548, acc 0.8125 2019-09-25T17:02:22.602462: step 860, loss 0.31592, acc 0.890625 2019-09-25T17:02:22.724129: step 861, loss 0.375206, acc 0.8125 2019-09-25T17:02:22.845349: step 862, loss 0.55094, acc 0.78125 2019-09-25T17:02:22.983277: step 863, loss 0.443224, acc 0.796875 2019-09-25T17:02:23.105589: step 864, loss 0.600938, acc 0.6875 2019-09-25T17:02:23.228979: step 865, loss 0.351047, acc 0.8125 2019-09-25T17:02:23.350504: step 866, loss 0.493639, acc 0.78125 2019-09-25T17:02:23.473198: step 867, loss 0.431772, acc 0.828125 2019-09-25T17:02:23.594457: step 868, loss 0.445135, acc 0.78125 2019-09-25T17:02:23.719548: step 869, loss 0.43258, acc 0.734375 2019-09-25T17:02:23.844929: step 870, loss 0.431541, acc 0.84375 2019-09-25T17:02:23.972443: step 871, loss 0.466901, acc 0.8125 2019-09-25T17:02:24.096317: step 872, loss 0.320619, acc 0.828125 2019-09-25T17:02:24.220499: step 873, loss 0.452102, acc 0.8125 2019-09-25T17:02:24.345223: step 874, loss 0.434343, acc 0.796875 2019-09-25T17:02:24.466760: step 875, loss 0.476602, acc 0.8125 2019-09-25T17:02:24.587679: step 876, loss 0.440217, acc 0.828125 2019-09-25T17:02:24.708071: step 877, loss 0.362332, acc 0.859375 2019-09-25T17:02:24.836018: step 878, loss 0.433709, acc 0.78125 2019-09-25T17:02:24.959899: step 879, loss 0.62206, acc 0.78125 2019-09-25T17:02:25.092169: step 880, loss 0.429013, acc 0.796875 2019-09-25T17:02:25.215593: step 881, loss 0.427277, acc 0.78125 2019-09-25T17:02:25.340056: step 882, loss 0.386797, acc 0.765625 2019-09-25T17:02:25.463985: step 883, loss 0.398849, acc 0.8125 2019-09-25T17:02:25.587029: step 884, loss 0.322466, acc 0.875 2019-09-25T17:02:25.708987: step 885, loss 0.529686, acc 0.78125 2019-09-25T17:02:25.830683: step 886, loss 0.56932, acc 0.734375 2019-09-25T17:02:25.952355: step 887, loss 0.43769, acc 0.78125 2019-09-25T17:02:26.076941: step 888, loss 0.476697, acc 0.8125 2019-09-25T17:02:26.198953: step 889, loss 0.412667, acc 0.84375 2019-09-25T17:02:26.321507: step 890, loss 0.438632, acc 0.84375 2019-09-25T17:02:26.443525: step 891, loss 0.544145, acc 0.703125 2019-09-25T17:02:26.565781: step 892, loss 0.479526, acc 0.796875 2019-09-25T17:02:26.687096: step 893, loss 0.501022, acc 0.734375 2019-09-25T17:02:26.809015: step 894, loss 0.542303, acc 0.75 2019-09-25T17:02:26.930740: step 895, loss 0.515559, acc 0.6875 2019-09-25T17:02:27.051622: step 896, loss 0.381258, acc 0.8125 2019-09-25T17:02:27.189717: step 897, loss 0.44103, acc 0.796875 2019-09-25T17:02:27.311152: step 898, loss 0.344866, acc 0.859375 2019-09-25T17:02:27.431414: step 899, loss 0.509291, acc 0.78125 2019-09-25T17:02:27.549515: step 900, loss 0.423913, acc 0.733333 Evaluation: 2019-09-25T17:02:27.751087: step 900, loss 0.597228, acc 0.681989 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-900 2019-09-25T17:02:27.987206: step 901, loss 0.414813, acc 0.78125 2019-09-25T17:02:28.108157: step 902, loss 0.401563, acc 0.8125 2019-09-25T17:02:28.242483: step 903, loss 0.26413, acc 0.875 2019-09-25T17:02:28.362749: step 904, loss 0.410456, acc 0.8125 2019-09-25T17:02:28.481512: step 905, loss 0.328886, acc 0.859375 2019-09-25T17:02:28.602509: step 906, loss 0.405868, acc 0.828125 2019-09-25T17:02:28.722823: step 907, loss 0.34161, acc 0.859375 2019-09-25T17:02:28.845944: step 908, loss 0.485038, acc 0.734375 2019-09-25T17:02:28.968304: step 909, loss 0.367835, acc 0.8125 2019-09-25T17:02:29.090298: step 910, loss 0.387561, acc 0.84375 2019-09-25T17:02:29.216331: step 911, loss 0.339301, acc 0.875 2019-09-25T17:02:29.345679: step 912, loss 0.473227, acc 0.765625 2019-09-25T17:02:29.468688: step 913, loss 0.315173, acc 0.84375 2019-09-25T17:02:29.590608: step 914, loss 0.312216, acc 0.84375 2019-09-25T17:02:29.710932: step 915, loss 0.42964, acc 0.796875 2019-09-25T17:02:29.832141: step 916, loss 0.289859, acc 0.84375 2019-09-25T17:02:29.950507: step 917, loss 0.399084, acc 0.796875 2019-09-25T17:02:30.071894: step 918, loss 0.36471, acc 0.859375 2019-09-25T17:02:30.191596: step 919, loss 0.325696, acc 0.828125 2019-09-25T17:02:30.323543: step 920, loss 0.225048, acc 0.953125 2019-09-25T17:02:30.441965: step 921, loss 0.424983, acc 0.8125 2019-09-25T17:02:30.561574: step 922, loss 0.353802, acc 0.828125 2019-09-25T17:02:30.684261: step 923, loss 0.448797, acc 0.796875 2019-09-25T17:02:30.805373: step 924, loss 0.356297, acc 0.84375 2019-09-25T17:02:30.926444: step 925, loss 0.327007, acc 0.796875 2019-09-25T17:02:31.047094: step 926, loss 0.394605, acc 0.84375 2019-09-25T17:02:31.168351: step 927, loss 0.294615, acc 0.90625 2019-09-25T17:02:31.298630: step 928, loss 0.242356, acc 0.921875 2019-09-25T17:02:31.419156: step 929, loss 0.294965, acc 0.890625 2019-09-25T17:02:31.537892: step 930, loss 0.30674, acc 0.875 2019-09-25T17:02:31.659453: step 931, loss 0.434803, acc 0.78125 2019-09-25T17:02:31.780954: step 932, loss 0.427646, acc 0.796875 2019-09-25T17:02:31.901111: step 933, loss 0.24734, acc 0.90625 2019-09-25T17:02:32.027881: step 934, loss 0.579248, acc 0.6875 2019-09-25T17:02:32.152387: step 935, loss 0.314713, acc 0.828125 2019-09-25T17:02:32.321919: step 936, loss 0.353297, acc 0.84375 2019-09-25T17:02:32.445853: step 937, loss 0.355177, acc 0.828125 2019-09-25T17:02:32.566173: step 938, loss 0.431149, acc 0.796875 2019-09-25T17:02:32.689417: step 939, loss 0.353072, acc 0.84375 2019-09-25T17:02:32.809826: step 940, loss 0.34358, acc 0.875 2019-09-25T17:02:32.929893: step 941, loss 0.278597, acc 0.859375 2019-09-25T17:02:33.048684: step 942, loss 0.383755, acc 0.8125 2019-09-25T17:02:33.168752: step 943, loss 0.341348, acc 0.828125 2019-09-25T17:02:33.289988: step 944, loss 0.327843, acc 0.8125 2019-09-25T17:02:33.424589: step 945, loss 0.336476, acc 0.890625 2019-09-25T17:02:33.543919: step 946, loss 0.380297, acc 0.8125 2019-09-25T17:02:33.664402: step 947, loss 0.336712, acc 0.828125 2019-09-25T17:02:33.786906: step 948, loss 0.397025, acc 0.796875 2019-09-25T17:02:33.909676: step 949, loss 0.470124, acc 0.796875 2019-09-25T17:02:34.030746: step 950, loss 0.443947, acc 0.78125 2019-09-25T17:02:34.155170: step 951, loss 0.377728, acc 0.90625 2019-09-25T17:02:34.276503: step 952, loss 0.267933, acc 0.890625 2019-09-25T17:02:34.402713: step 953, loss 0.426843, acc 0.796875 2019-09-25T17:02:34.526438: step 954, loss 0.343422, acc 0.796875 2019-09-25T17:02:34.645846: step 955, loss 0.340977, acc 0.828125 2019-09-25T17:02:34.765057: step 956, loss 0.460966, acc 0.75 2019-09-25T17:02:34.883780: step 957, loss 0.404964, acc 0.796875 2019-09-25T17:02:35.002621: step 958, loss 0.296102, acc 0.875 2019-09-25T17:02:35.123992: step 959, loss 0.484983, acc 0.703125 2019-09-25T17:02:35.243633: step 960, loss 0.452353, acc 0.765625 2019-09-25T17:02:35.367373: step 961, loss 0.368324, acc 0.84375 2019-09-25T17:02:35.501592: step 962, loss 0.313121, acc 0.828125 2019-09-25T17:02:35.622899: step 963, loss 0.48756, acc 0.78125 2019-09-25T17:02:35.740450: step 964, loss 0.474526, acc 0.8125 2019-09-25T17:02:35.861856: step 965, loss 0.402058, acc 0.796875 2019-09-25T17:02:35.985246: step 966, loss 0.329201, acc 0.875 2019-09-25T17:02:36.105459: step 967, loss 0.235274, acc 0.890625 2019-09-25T17:02:36.228356: step 968, loss 0.458283, acc 0.78125 2019-09-25T17:02:36.350262: step 969, loss 0.573281, acc 0.703125 2019-09-25T17:02:36.474848: step 970, loss 0.443104, acc 0.78125 2019-09-25T17:02:36.599439: step 971, loss 0.404277, acc 0.8125 2019-09-25T17:02:36.720173: step 972, loss 0.219792, acc 0.90625 2019-09-25T17:02:36.845744: step 973, loss 0.303204, acc 0.890625 2019-09-25T17:02:36.966720: step 974, loss 0.412169, acc 0.78125 2019-09-25T17:02:37.087331: step 975, loss 0.375489, acc 0.84375 2019-09-25T17:02:37.206510: step 976, loss 0.261396, acc 0.921875 2019-09-25T17:02:37.328451: step 977, loss 0.304882, acc 0.859375 2019-09-25T17:02:37.448343: step 978, loss 0.350147, acc 0.875 2019-09-25T17:02:37.581036: step 979, loss 0.5029, acc 0.796875 2019-09-25T17:02:37.702680: step 980, loss 0.356724, acc 0.859375 2019-09-25T17:02:37.823501: step 981, loss 0.287899, acc 0.890625 2019-09-25T17:02:37.944364: step 982, loss 0.379322, acc 0.859375 2019-09-25T17:02:38.064078: step 983, loss 0.428283, acc 0.765625 2019-09-25T17:02:38.182445: step 984, loss 0.363227, acc 0.84375 2019-09-25T17:02:38.304649: step 985, loss 0.316074, acc 0.84375 2019-09-25T17:02:38.424143: step 986, loss 0.383368, acc 0.84375 2019-09-25T17:02:38.544732: step 987, loss 0.336333, acc 0.859375 2019-09-25T17:02:38.665568: step 988, loss 0.357147, acc 0.84375 2019-09-25T17:02:38.786460: step 989, loss 0.388371, acc 0.859375 2019-09-25T17:02:38.909662: step 990, loss 0.421693, acc 0.828125 2019-09-25T17:02:39.029191: step 991, loss 0.357833, acc 0.859375 2019-09-25T17:02:39.151478: step 992, loss 0.393178, acc 0.78125 2019-09-25T17:02:39.272339: step 993, loss 0.269501, acc 0.90625 2019-09-25T17:02:39.398071: step 994, loss 0.368036, acc 0.859375 2019-09-25T17:02:39.517298: step 995, loss 0.311567, acc 0.84375 2019-09-25T17:02:39.653928: step 996, loss 0.487605, acc 0.765625 2019-09-25T17:02:39.775402: step 997, loss 0.250973, acc 0.90625 2019-09-25T17:02:39.895070: step 998, loss 0.292613, acc 0.84375 2019-09-25T17:02:40.016715: step 999, loss 0.317561, acc 0.859375 2019-09-25T17:02:40.138921: step 1000, loss 0.384842, acc 0.84375 Evaluation: 2019-09-25T17:02:40.358341: step 1000, loss 0.573003, acc 0.712008 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1000 2019-09-25T17:02:40.590141: step 1001, loss 0.375292, acc 0.78125 2019-09-25T17:02:40.721717: step 1002, loss 0.357494, acc 0.828125 2019-09-25T17:02:40.841172: step 1003, loss 0.348617, acc 0.828125 2019-09-25T17:02:40.961842: step 1004, loss 0.406126, acc 0.796875 2019-09-25T17:02:41.083437: step 1005, loss 0.363042, acc 0.84375 2019-09-25T17:02:41.203014: step 1006, loss 0.411546, acc 0.78125 2019-09-25T17:02:41.327999: step 1007, loss 0.229569, acc 0.890625 2019-09-25T17:02:41.447448: step 1008, loss 0.349245, acc 0.859375 2019-09-25T17:02:41.567386: step 1009, loss 0.386001, acc 0.796875 2019-09-25T17:02:41.695239: step 1010, loss 0.475698, acc 0.75 2019-09-25T17:02:41.818692: step 1011, loss 0.436492, acc 0.78125 2019-09-25T17:02:41.941826: step 1012, loss 0.343713, acc 0.828125 2019-09-25T17:02:42.064523: step 1013, loss 0.316605, acc 0.84375 2019-09-25T17:02:42.185556: step 1014, loss 0.296591, acc 0.875 2019-09-25T17:02:42.309605: step 1015, loss 0.502395, acc 0.75 2019-09-25T17:02:42.455462: step 1016, loss 0.405633, acc 0.84375 2019-09-25T17:02:42.581655: step 1017, loss 0.334982, acc 0.875 2019-09-25T17:02:42.704105: step 1018, loss 0.458543, acc 0.75 2019-09-25T17:02:42.825787: step 1019, loss 0.33226, acc 0.859375 2019-09-25T17:02:42.946036: step 1020, loss 0.336501, acc 0.875 2019-09-25T17:02:43.073930: step 1021, loss 0.396059, acc 0.796875 2019-09-25T17:02:43.193406: step 1022, loss 0.473316, acc 0.75 2019-09-25T17:02:43.313431: step 1023, loss 0.275406, acc 0.90625 2019-09-25T17:02:43.433997: step 1024, loss 0.433407, acc 0.75 2019-09-25T17:02:43.553912: step 1025, loss 0.358976, acc 0.8125 2019-09-25T17:02:43.671616: step 1026, loss 0.333842, acc 0.875 2019-09-25T17:02:43.804071: step 1027, loss 0.416434, acc 0.828125 2019-09-25T17:02:43.922872: step 1028, loss 0.334101, acc 0.828125 2019-09-25T17:02:44.042892: step 1029, loss 0.410459, acc 0.84375 2019-09-25T17:02:44.162457: step 1030, loss 0.405746, acc 0.875 2019-09-25T17:02:44.282961: step 1031, loss 0.452375, acc 0.828125 2019-09-25T17:02:44.405684: step 1032, loss 0.29027, acc 0.859375 2019-09-25T17:02:44.528640: step 1033, loss 0.271014, acc 0.921875 2019-09-25T17:02:44.651210: step 1034, loss 0.324748, acc 0.84375 2019-09-25T17:02:44.777341: step 1035, loss 0.390097, acc 0.828125 2019-09-25T17:02:44.896872: step 1036, loss 0.348945, acc 0.828125 2019-09-25T17:02:45.018929: step 1037, loss 0.410447, acc 0.859375 2019-09-25T17:02:45.141040: step 1038, loss 0.370772, acc 0.8125 2019-09-25T17:02:45.263599: step 1039, loss 0.523512, acc 0.796875 2019-09-25T17:02:45.384614: step 1040, loss 0.451986, acc 0.8125 2019-09-25T17:02:45.504176: step 1041, loss 0.418309, acc 0.734375 2019-09-25T17:02:45.627981: step 1042, loss 0.226768, acc 0.921875 2019-09-25T17:02:45.747522: step 1043, loss 0.291574, acc 0.90625 2019-09-25T17:02:45.882071: step 1044, loss 0.456743, acc 0.765625 2019-09-25T17:02:46.006344: step 1045, loss 0.301051, acc 0.890625 2019-09-25T17:02:46.125508: step 1046, loss 0.305098, acc 0.84375 2019-09-25T17:02:46.252215: step 1047, loss 0.353028, acc 0.875 2019-09-25T17:02:46.373876: step 1048, loss 0.446811, acc 0.796875 2019-09-25T17:02:46.494930: step 1049, loss 0.37142, acc 0.8125 2019-09-25T17:02:46.615816: step 1050, loss 0.439959, acc 0.783333 2019-09-25T17:02:46.741473: step 1051, loss 0.28048, acc 0.890625 2019-09-25T17:02:46.867981: step 1052, loss 0.328991, acc 0.84375 2019-09-25T17:02:46.989088: step 1053, loss 0.356414, acc 0.890625 2019-09-25T17:02:47.111782: step 1054, loss 0.297062, acc 0.84375 2019-09-25T17:02:47.233464: step 1055, loss 0.597199, acc 0.75 2019-09-25T17:02:47.357014: step 1056, loss 0.380355, acc 0.84375 2019-09-25T17:02:47.479673: step 1057, loss 0.282388, acc 0.90625 2019-09-25T17:02:47.599137: step 1058, loss 0.296327, acc 0.828125 2019-09-25T17:02:47.719036: step 1059, loss 0.293602, acc 0.875 2019-09-25T17:02:47.842272: step 1060, loss 0.31871, acc 0.84375 2019-09-25T17:02:47.977346: step 1061, loss 0.337486, acc 0.828125 2019-09-25T17:02:48.099152: step 1062, loss 0.40731, acc 0.828125 2019-09-25T17:02:48.221367: step 1063, loss 0.317575, acc 0.875 2019-09-25T17:02:48.344471: step 1064, loss 0.278132, acc 0.84375 2019-09-25T17:02:48.467103: step 1065, loss 0.239483, acc 0.9375 2019-09-25T17:02:48.588919: step 1066, loss 0.296163, acc 0.84375 2019-09-25T17:02:48.709987: step 1067, loss 0.239325, acc 0.90625 2019-09-25T17:02:48.831110: step 1068, loss 0.263914, acc 0.890625 2019-09-25T17:02:48.966811: step 1069, loss 0.328918, acc 0.828125 2019-09-25T17:02:49.092605: step 1070, loss 0.323914, acc 0.859375 2019-09-25T17:02:49.211516: step 1071, loss 0.319032, acc 0.859375 2019-09-25T17:02:49.333208: step 1072, loss 0.389631, acc 0.859375 2019-09-25T17:02:49.458167: step 1073, loss 0.246107, acc 0.875 2019-09-25T17:02:49.578151: step 1074, loss 0.312998, acc 0.890625 2019-09-25T17:02:49.698065: step 1075, loss 0.322841, acc 0.84375 2019-09-25T17:02:49.819362: step 1076, loss 0.388166, acc 0.828125 2019-09-25T17:02:49.943318: step 1077, loss 0.244227, acc 0.921875 2019-09-25T17:02:50.073966: step 1078, loss 0.298529, acc 0.828125 2019-09-25T17:02:50.194773: step 1079, loss 0.303961, acc 0.90625 2019-09-25T17:02:50.315209: step 1080, loss 0.303093, acc 0.84375 2019-09-25T17:02:50.439592: step 1081, loss 0.387554, acc 0.890625 2019-09-25T17:02:50.557897: step 1082, loss 0.316104, acc 0.859375 2019-09-25T17:02:50.677078: step 1083, loss 0.434156, acc 0.828125 2019-09-25T17:02:50.802463: step 1084, loss 0.292489, acc 0.890625 2019-09-25T17:02:50.925062: step 1085, loss 0.255724, acc 0.921875 2019-09-25T17:02:51.059204: step 1086, loss 0.335516, acc 0.84375 2019-09-25T17:02:51.178638: step 1087, loss 0.357146, acc 0.875 2019-09-25T17:02:51.302025: step 1088, loss 0.283203, acc 0.921875 2019-09-25T17:02:51.422810: step 1089, loss 0.277572, acc 0.890625 2019-09-25T17:02:51.545115: step 1090, loss 0.307381, acc 0.875 2019-09-25T17:02:51.669344: step 1091, loss 0.319299, acc 0.890625 2019-09-25T17:02:51.790423: step 1092, loss 0.333656, acc 0.828125 2019-09-25T17:02:51.908849: step 1093, loss 0.241265, acc 0.90625 2019-09-25T17:02:52.034896: step 1094, loss 0.291913, acc 0.890625 2019-09-25T17:02:52.161145: step 1095, loss 0.312429, acc 0.859375 2019-09-25T17:02:52.282310: step 1096, loss 0.274634, acc 0.921875 2019-09-25T17:02:52.401838: step 1097, loss 0.199622, acc 0.953125 2019-09-25T17:02:52.524538: step 1098, loss 0.312174, acc 0.875 2019-09-25T17:02:52.648999: step 1099, loss 0.316011, acc 0.875 2019-09-25T17:02:52.771514: step 1100, loss 0.303779, acc 0.828125 Evaluation: 2019-09-25T17:02:52.987961: step 1100, loss 0.572715, acc 0.725141 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1100 2019-09-25T17:02:53.257405: step 1101, loss 0.275256, acc 0.9375 2019-09-25T17:02:53.380782: step 1102, loss 0.302543, acc 0.828125 2019-09-25T17:02:53.502316: step 1103, loss 0.356701, acc 0.828125 2019-09-25T17:02:53.620278: step 1104, loss 0.199642, acc 0.921875 2019-09-25T17:02:53.743483: step 1105, loss 0.204435, acc 0.9375 2019-09-25T17:02:53.869737: step 1106, loss 0.205014, acc 0.90625 2019-09-25T17:02:53.990724: step 1107, loss 0.430918, acc 0.796875 2019-09-25T17:02:54.115586: step 1108, loss 0.405463, acc 0.8125 2019-09-25T17:02:54.238493: step 1109, loss 0.35312, acc 0.84375 2019-09-25T17:02:54.363692: step 1110, loss 0.271184, acc 0.859375 2019-09-25T17:02:54.484187: step 1111, loss 0.221781, acc 0.921875 2019-09-25T17:02:54.606446: step 1112, loss 0.223862, acc 0.921875 2019-09-25T17:02:54.729245: step 1113, loss 0.377873, acc 0.796875 2019-09-25T17:02:54.851758: step 1114, loss 0.231678, acc 0.890625 2019-09-25T17:02:54.973068: step 1115, loss 0.305187, acc 0.84375 2019-09-25T17:02:55.099112: step 1116, loss 0.263087, acc 0.875 2019-09-25T17:02:55.230442: step 1117, loss 0.378417, acc 0.84375 2019-09-25T17:02:55.353472: step 1118, loss 0.325588, acc 0.859375 2019-09-25T17:02:55.474379: step 1119, loss 0.247204, acc 0.90625 2019-09-25T17:02:55.594846: step 1120, loss 0.32691, acc 0.890625 2019-09-25T17:02:55.712182: step 1121, loss 0.329176, acc 0.84375 2019-09-25T17:02:55.835287: step 1122, loss 0.235136, acc 0.859375 2019-09-25T17:02:55.956991: step 1123, loss 0.42741, acc 0.828125 2019-09-25T17:02:56.080527: step 1124, loss 0.20421, acc 0.9375 2019-09-25T17:02:56.206830: step 1125, loss 0.238294, acc 0.875 2019-09-25T17:02:56.329577: step 1126, loss 0.223503, acc 0.921875 2019-09-25T17:02:56.451176: step 1127, loss 0.34498, acc 0.875 2019-09-25T17:02:56.573247: step 1128, loss 0.363648, acc 0.875 2019-09-25T17:02:56.696021: step 1129, loss 0.445429, acc 0.78125 2019-09-25T17:02:56.816955: step 1130, loss 0.405722, acc 0.828125 2019-09-25T17:02:56.939718: step 1131, loss 0.215181, acc 0.9375 2019-09-25T17:02:57.060550: step 1132, loss 0.34833, acc 0.84375 2019-09-25T17:02:57.182545: step 1133, loss 0.235813, acc 0.921875 2019-09-25T17:02:57.316995: step 1134, loss 0.35706, acc 0.828125 2019-09-25T17:02:57.440531: step 1135, loss 0.284917, acc 0.90625 2019-09-25T17:02:57.590747: step 1136, loss 0.401216, acc 0.78125 2019-09-25T17:02:57.715083: step 1137, loss 0.244209, acc 0.890625 2019-09-25T17:02:57.836705: step 1138, loss 0.210039, acc 0.875 2019-09-25T17:02:57.958537: step 1139, loss 0.2082, acc 0.890625 2019-09-25T17:02:58.080879: step 1140, loss 0.229771, acc 0.921875 2019-09-25T17:02:58.204070: step 1141, loss 0.195499, acc 0.921875 2019-09-25T17:02:58.338346: step 1142, loss 0.247861, acc 0.921875 2019-09-25T17:02:58.459349: step 1143, loss 0.258385, acc 0.875 2019-09-25T17:02:58.582810: step 1144, loss 0.335661, acc 0.859375 2019-09-25T17:02:58.702456: step 1145, loss 0.27602, acc 0.921875 2019-09-25T17:02:58.822761: step 1146, loss 0.483475, acc 0.828125 2019-09-25T17:02:58.943500: step 1147, loss 0.18119, acc 0.90625 2019-09-25T17:02:59.067737: step 1148, loss 0.329412, acc 0.859375 2019-09-25T17:02:59.187327: step 1149, loss 0.27229, acc 0.890625 2019-09-25T17:02:59.314793: step 1150, loss 0.258721, acc 0.84375 2019-09-25T17:02:59.436563: step 1151, loss 0.329974, acc 0.859375 2019-09-25T17:02:59.556715: step 1152, loss 0.377011, acc 0.84375 2019-09-25T17:02:59.680323: step 1153, loss 0.397468, acc 0.828125 2019-09-25T17:02:59.800691: step 1154, loss 0.261957, acc 0.859375 2019-09-25T17:02:59.939361: step 1155, loss 0.404609, acc 0.796875 2019-09-25T17:03:00.061105: step 1156, loss 0.206978, acc 0.953125 2019-09-25T17:03:00.181816: step 1157, loss 0.250279, acc 0.890625 2019-09-25T17:03:00.304172: step 1158, loss 0.209215, acc 0.9375 2019-09-25T17:03:00.439162: step 1159, loss 0.243735, acc 0.875 2019-09-25T17:03:00.562587: step 1160, loss 0.380155, acc 0.75 2019-09-25T17:03:00.682251: step 1161, loss 0.275044, acc 0.921875 2019-09-25T17:03:00.802164: step 1162, loss 0.25541, acc 0.90625 2019-09-25T17:03:00.926248: step 1163, loss 0.303355, acc 0.90625 2019-09-25T17:03:01.051093: step 1164, loss 0.348823, acc 0.828125 2019-09-25T17:03:01.175346: step 1165, loss 0.257804, acc 0.890625 2019-09-25T17:03:01.294836: step 1166, loss 0.239452, acc 0.890625 2019-09-25T17:03:01.426014: step 1167, loss 0.31996, acc 0.875 2019-09-25T17:03:01.548727: step 1168, loss 0.28749, acc 0.921875 2019-09-25T17:03:01.669261: step 1169, loss 0.28352, acc 0.875 2019-09-25T17:03:01.792549: step 1170, loss 0.231261, acc 0.90625 2019-09-25T17:03:01.913058: step 1171, loss 0.327199, acc 0.84375 2019-09-25T17:03:02.036606: step 1172, loss 0.222518, acc 0.90625 2019-09-25T17:03:02.160132: step 1173, loss 0.483289, acc 0.796875 2019-09-25T17:03:02.283980: step 1174, loss 0.174686, acc 0.984375 2019-09-25T17:03:02.411589: step 1175, loss 0.250509, acc 0.859375 2019-09-25T17:03:02.538145: step 1176, loss 0.2602, acc 0.890625 2019-09-25T17:03:02.662295: step 1177, loss 0.259211, acc 0.890625 2019-09-25T17:03:02.785416: step 1178, loss 0.366049, acc 0.8125 2019-09-25T17:03:02.905166: step 1179, loss 0.247134, acc 0.9375 2019-09-25T17:03:03.029603: step 1180, loss 0.322431, acc 0.84375 2019-09-25T17:03:03.152977: step 1181, loss 0.438785, acc 0.78125 2019-09-25T17:03:03.277192: step 1182, loss 0.435637, acc 0.78125 2019-09-25T17:03:03.403402: step 1183, loss 0.223839, acc 0.921875 2019-09-25T17:03:03.536850: step 1184, loss 0.325451, acc 0.84375 2019-09-25T17:03:03.660314: step 1185, loss 0.248777, acc 0.890625 2019-09-25T17:03:03.781343: step 1186, loss 0.21552, acc 0.9375 2019-09-25T17:03:03.901166: step 1187, loss 0.289972, acc 0.890625 2019-09-25T17:03:04.020140: step 1188, loss 0.338646, acc 0.875 2019-09-25T17:03:04.139605: step 1189, loss 0.311441, acc 0.859375 2019-09-25T17:03:04.259480: step 1190, loss 0.366386, acc 0.875 2019-09-25T17:03:04.382709: step 1191, loss 0.253617, acc 0.921875 2019-09-25T17:03:04.509122: step 1192, loss 0.247341, acc 0.921875 2019-09-25T17:03:04.628239: step 1193, loss 0.273353, acc 0.890625 2019-09-25T17:03:04.748175: step 1194, loss 0.322243, acc 0.875 2019-09-25T17:03:04.869659: step 1195, loss 0.326792, acc 0.84375 2019-09-25T17:03:04.989079: step 1196, loss 0.533126, acc 0.75 2019-09-25T17:03:05.111722: step 1197, loss 0.227294, acc 0.90625 2019-09-25T17:03:05.235377: step 1198, loss 0.386371, acc 0.859375 2019-09-25T17:03:05.361344: step 1199, loss 0.313377, acc 0.859375 2019-09-25T17:03:05.478609: step 1200, loss 0.308703, acc 0.9 Evaluation: 2019-09-25T17:03:05.691749: step 1200, loss 0.593496, acc 0.727955 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1200 2019-09-25T17:03:05.946842: step 1201, loss 0.257591, acc 0.890625 2019-09-25T17:03:06.068598: step 1202, loss 0.271327, acc 0.90625 2019-09-25T17:03:06.191106: step 1203, loss 0.238247, acc 0.890625 2019-09-25T17:03:06.315696: step 1204, loss 0.222692, acc 0.890625 2019-09-25T17:03:06.437890: step 1205, loss 0.193428, acc 0.9375 2019-09-25T17:03:06.559218: step 1206, loss 0.198643, acc 0.921875 2019-09-25T17:03:06.694471: step 1207, loss 0.185124, acc 0.90625 2019-09-25T17:03:06.816324: step 1208, loss 0.269701, acc 0.890625 2019-09-25T17:03:06.938302: step 1209, loss 0.199077, acc 0.9375 2019-09-25T17:03:07.059324: step 1210, loss 0.241072, acc 0.90625 2019-09-25T17:03:07.182359: step 1211, loss 0.216613, acc 0.9375 2019-09-25T17:03:07.303733: step 1212, loss 0.1463, acc 0.9375 2019-09-25T17:03:07.430841: step 1213, loss 0.335247, acc 0.828125 2019-09-25T17:03:07.555284: step 1214, loss 0.125248, acc 0.953125 2019-09-25T17:03:07.712857: step 1215, loss 0.230082, acc 0.90625 2019-09-25T17:03:07.836247: step 1216, loss 0.366684, acc 0.84375 2019-09-25T17:03:07.965199: step 1217, loss 0.211708, acc 0.921875 2019-09-25T17:03:08.089371: step 1218, loss 0.14903, acc 0.953125 2019-09-25T17:03:08.209153: step 1219, loss 0.270489, acc 0.859375 2019-09-25T17:03:08.330763: step 1220, loss 0.314986, acc 0.890625 2019-09-25T17:03:08.453318: step 1221, loss 0.245416, acc 0.9375 2019-09-25T17:03:08.571977: step 1222, loss 0.258761, acc 0.90625 2019-09-25T17:03:08.697024: step 1223, loss 0.221242, acc 0.921875 2019-09-25T17:03:08.816614: step 1224, loss 0.262602, acc 0.921875 2019-09-25T17:03:08.936224: step 1225, loss 0.198444, acc 0.9375 2019-09-25T17:03:09.056145: step 1226, loss 0.217057, acc 0.921875 2019-09-25T17:03:09.177819: step 1227, loss 0.244026, acc 0.921875 2019-09-25T17:03:09.297706: step 1228, loss 0.267564, acc 0.890625 2019-09-25T17:03:09.423575: step 1229, loss 0.207073, acc 0.890625 2019-09-25T17:03:09.544522: step 1230, loss 0.128145, acc 0.96875 2019-09-25T17:03:09.663267: step 1231, loss 0.264553, acc 0.9375 2019-09-25T17:03:09.794286: step 1232, loss 0.301131, acc 0.875 2019-09-25T17:03:09.915410: step 1233, loss 0.19563, acc 0.9375 2019-09-25T17:03:10.036799: step 1234, loss 0.198667, acc 0.9375 2019-09-25T17:03:10.159579: step 1235, loss 0.214309, acc 0.953125 2019-09-25T17:03:10.282847: step 1236, loss 0.187021, acc 0.96875 2019-09-25T17:03:10.408494: step 1237, loss 0.249037, acc 0.875 2019-09-25T17:03:10.532186: step 1238, loss 0.217584, acc 0.875 2019-09-25T17:03:10.650831: step 1239, loss 0.21257, acc 0.90625 2019-09-25T17:03:10.787503: step 1240, loss 0.185519, acc 0.9375 2019-09-25T17:03:10.908478: step 1241, loss 0.240885, acc 0.890625 2019-09-25T17:03:11.031692: step 1242, loss 0.164045, acc 0.953125 2019-09-25T17:03:11.150713: step 1243, loss 0.435102, acc 0.84375 2019-09-25T17:03:11.275711: step 1244, loss 0.21635, acc 0.921875 2019-09-25T17:03:11.398082: step 1245, loss 0.171958, acc 0.9375 2019-09-25T17:03:11.518428: step 1246, loss 0.212747, acc 0.90625 2019-09-25T17:03:11.637886: step 1247, loss 0.183321, acc 0.96875 2019-09-25T17:03:11.761713: step 1248, loss 0.180965, acc 0.9375 2019-09-25T17:03:11.896674: step 1249, loss 0.213527, acc 0.890625 2019-09-25T17:03:12.016224: step 1250, loss 0.290298, acc 0.890625 2019-09-25T17:03:12.139666: step 1251, loss 0.224993, acc 0.90625 2019-09-25T17:03:12.261322: step 1252, loss 0.274522, acc 0.875 2019-09-25T17:03:12.384685: step 1253, loss 0.17711, acc 0.9375 2019-09-25T17:03:12.504494: step 1254, loss 0.292754, acc 0.90625 2019-09-25T17:03:12.625715: step 1255, loss 0.178584, acc 0.9375 2019-09-25T17:03:12.747217: step 1256, loss 0.222266, acc 0.9375 2019-09-25T17:03:12.872594: step 1257, loss 0.312538, acc 0.890625 2019-09-25T17:03:12.996338: step 1258, loss 0.339646, acc 0.828125 2019-09-25T17:03:13.115752: step 1259, loss 0.207235, acc 0.9375 2019-09-25T17:03:13.237517: step 1260, loss 0.273905, acc 0.90625 2019-09-25T17:03:13.360642: step 1261, loss 0.314925, acc 0.84375 2019-09-25T17:03:13.479767: step 1262, loss 0.209328, acc 0.9375 2019-09-25T17:03:13.599590: step 1263, loss 0.226889, acc 0.921875 2019-09-25T17:03:13.720221: step 1264, loss 0.162745, acc 0.9375 2019-09-25T17:03:13.844315: step 1265, loss 0.334694, acc 0.84375 2019-09-25T17:03:13.974941: step 1266, loss 0.334511, acc 0.84375 2019-09-25T17:03:14.095080: step 1267, loss 0.286914, acc 0.890625 2019-09-25T17:03:14.214999: step 1268, loss 0.252214, acc 0.90625 2019-09-25T17:03:14.335689: step 1269, loss 0.268301, acc 0.875 2019-09-25T17:03:14.457852: step 1270, loss 0.232817, acc 0.9375 2019-09-25T17:03:14.578818: step 1271, loss 0.254718, acc 0.90625 2019-09-25T17:03:14.698855: step 1272, loss 0.223527, acc 0.90625 2019-09-25T17:03:14.818402: step 1273, loss 0.409111, acc 0.859375 2019-09-25T17:03:14.944314: step 1274, loss 0.303908, acc 0.84375 2019-09-25T17:03:15.065449: step 1275, loss 0.254246, acc 0.90625 2019-09-25T17:03:15.187146: step 1276, loss 0.302979, acc 0.859375 2019-09-25T17:03:15.306281: step 1277, loss 0.246228, acc 0.90625 2019-09-25T17:03:15.428306: step 1278, loss 0.258497, acc 0.875 2019-09-25T17:03:15.550938: step 1279, loss 0.182517, acc 0.921875 2019-09-25T17:03:15.670217: step 1280, loss 0.25547, acc 0.875 2019-09-25T17:03:15.790458: step 1281, loss 0.346758, acc 0.828125 2019-09-25T17:03:15.911498: step 1282, loss 0.276652, acc 0.875 2019-09-25T17:03:16.047576: step 1283, loss 0.466597, acc 0.765625 2019-09-25T17:03:16.170977: step 1284, loss 0.229182, acc 0.921875 2019-09-25T17:03:16.293337: step 1285, loss 0.2733, acc 0.890625 2019-09-25T17:03:16.417020: step 1286, loss 0.175611, acc 0.921875 2019-09-25T17:03:16.538598: step 1287, loss 0.269984, acc 0.90625 2019-09-25T17:03:16.668135: step 1288, loss 0.200498, acc 0.921875 2019-09-25T17:03:16.790421: step 1289, loss 0.279438, acc 0.875 2019-09-25T17:03:16.914251: step 1290, loss 0.171983, acc 0.90625 2019-09-25T17:03:17.044595: step 1291, loss 0.174411, acc 0.921875 2019-09-25T17:03:17.164098: step 1292, loss 0.244331, acc 0.890625 2019-09-25T17:03:17.285715: step 1293, loss 0.163725, acc 0.953125 2019-09-25T17:03:17.409087: step 1294, loss 0.258135, acc 0.9375 2019-09-25T17:03:17.530977: step 1295, loss 0.272866, acc 0.859375 2019-09-25T17:03:17.651934: step 1296, loss 0.199499, acc 0.921875 2019-09-25T17:03:17.772706: step 1297, loss 0.153794, acc 0.921875 2019-09-25T17:03:17.893576: step 1298, loss 0.20517, acc 0.921875 2019-09-25T17:03:18.015139: step 1299, loss 0.19196, acc 0.921875 2019-09-25T17:03:18.145239: step 1300, loss 0.200522, acc 0.90625 Evaluation: 2019-09-25T17:03:18.347962: step 1300, loss 0.611813, acc 0.72045 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1300 2019-09-25T17:03:18.605956: step 1301, loss 0.250259, acc 0.859375 2019-09-25T17:03:18.726049: step 1302, loss 0.358255, acc 0.859375 2019-09-25T17:03:18.847755: step 1303, loss 0.221633, acc 0.90625 2019-09-25T17:03:18.975345: step 1304, loss 0.310645, acc 0.875 2019-09-25T17:03:19.100378: step 1305, loss 0.396529, acc 0.84375 2019-09-25T17:03:19.220198: step 1306, loss 0.191922, acc 0.921875 2019-09-25T17:03:19.344788: step 1307, loss 0.249311, acc 0.875 2019-09-25T17:03:19.467603: step 1308, loss 0.283225, acc 0.875 2019-09-25T17:03:19.586230: step 1309, loss 0.178982, acc 0.953125 2019-09-25T17:03:19.706846: step 1310, loss 0.250664, acc 0.875 2019-09-25T17:03:19.828007: step 1311, loss 0.370568, acc 0.84375 2019-09-25T17:03:19.946747: step 1312, loss 0.256444, acc 0.890625 2019-09-25T17:03:20.066417: step 1313, loss 0.254227, acc 0.890625 2019-09-25T17:03:20.200588: step 1314, loss 0.259798, acc 0.90625 2019-09-25T17:03:20.322898: step 1315, loss 0.137453, acc 0.953125 2019-09-25T17:03:20.444897: step 1316, loss 0.384134, acc 0.875 2019-09-25T17:03:20.565037: step 1317, loss 0.22242, acc 0.921875 2019-09-25T17:03:20.685587: step 1318, loss 0.321982, acc 0.84375 2019-09-25T17:03:20.808445: step 1319, loss 0.200714, acc 0.90625 2019-09-25T17:03:20.932456: step 1320, loss 0.404983, acc 0.875 2019-09-25T17:03:21.054221: step 1321, loss 0.252245, acc 0.90625 2019-09-25T17:03:21.178490: step 1322, loss 0.233406, acc 0.890625 2019-09-25T17:03:21.298763: step 1323, loss 0.359982, acc 0.84375 2019-09-25T17:03:21.419767: step 1324, loss 0.2792, acc 0.890625 2019-09-25T17:03:21.540786: step 1325, loss 0.219375, acc 0.890625 2019-09-25T17:03:21.663079: step 1326, loss 0.188373, acc 0.921875 2019-09-25T17:03:21.783380: step 1327, loss 0.203873, acc 0.9375 2019-09-25T17:03:21.905062: step 1328, loss 0.230119, acc 0.921875 2019-09-25T17:03:22.027480: step 1329, loss 0.263951, acc 0.921875 2019-09-25T17:03:22.149096: step 1330, loss 0.187026, acc 0.9375 2019-09-25T17:03:22.283763: step 1331, loss 0.213681, acc 0.90625 2019-09-25T17:03:22.405267: step 1332, loss 0.271715, acc 0.890625 2019-09-25T17:03:22.528243: step 1333, loss 0.135117, acc 0.953125 2019-09-25T17:03:22.648931: step 1334, loss 0.234622, acc 0.890625 2019-09-25T17:03:22.770224: step 1335, loss 0.197051, acc 0.921875 2019-09-25T17:03:22.921240: step 1336, loss 0.356587, acc 0.875 2019-09-25T17:03:23.043325: step 1337, loss 0.204511, acc 0.890625 2019-09-25T17:03:23.163426: step 1338, loss 0.228819, acc 0.9375 2019-09-25T17:03:23.296212: step 1339, loss 0.329107, acc 0.859375 2019-09-25T17:03:23.418455: step 1340, loss 0.337057, acc 0.875 2019-09-25T17:03:23.538492: step 1341, loss 0.151816, acc 0.9375 2019-09-25T17:03:23.657512: step 1342, loss 0.241174, acc 0.859375 2019-09-25T17:03:23.781055: step 1343, loss 0.290949, acc 0.890625 2019-09-25T17:03:23.902988: step 1344, loss 0.184114, acc 0.90625 2019-09-25T17:03:24.025876: step 1345, loss 0.317447, acc 0.875 2019-09-25T17:03:24.147046: step 1346, loss 0.172552, acc 0.921875 2019-09-25T17:03:24.269499: step 1347, loss 0.194288, acc 0.90625 2019-09-25T17:03:24.399602: step 1348, loss 0.171581, acc 0.9375 2019-09-25T17:03:24.522324: step 1349, loss 0.244583, acc 0.84375 2019-09-25T17:03:24.636545: step 1350, loss 0.297408, acc 0.883333 2019-09-25T17:03:24.763573: step 1351, loss 0.155619, acc 0.9375 2019-09-25T17:03:24.885895: step 1352, loss 0.170628, acc 0.9375 2019-09-25T17:03:25.005008: step 1353, loss 0.133283, acc 0.9375 2019-09-25T17:03:25.124880: step 1354, loss 0.111289, acc 0.96875 2019-09-25T17:03:25.245351: step 1355, loss 0.193176, acc 0.953125 2019-09-25T17:03:25.380897: step 1356, loss 0.210946, acc 0.90625 2019-09-25T17:03:25.499610: step 1357, loss 0.151478, acc 0.953125 2019-09-25T17:03:25.619745: step 1358, loss 0.229711, acc 0.890625 2019-09-25T17:03:25.741534: step 1359, loss 0.113456, acc 0.96875 2019-09-25T17:03:25.862743: step 1360, loss 0.202504, acc 0.890625 2019-09-25T17:03:25.980871: step 1361, loss 0.167612, acc 0.921875 2019-09-25T17:03:26.100939: step 1362, loss 0.167386, acc 0.953125 2019-09-25T17:03:26.223944: step 1363, loss 0.10462, acc 1 2019-09-25T17:03:26.345298: step 1364, loss 0.191715, acc 0.90625 2019-09-25T17:03:26.480210: step 1365, loss 0.115138, acc 0.96875 2019-09-25T17:03:26.602419: step 1366, loss 0.211598, acc 0.921875 2019-09-25T17:03:26.724608: step 1367, loss 0.107914, acc 0.984375 2019-09-25T17:03:26.847685: step 1368, loss 0.222921, acc 0.875 2019-09-25T17:03:26.967535: step 1369, loss 0.111419, acc 0.96875 2019-09-25T17:03:27.090839: step 1370, loss 0.183721, acc 0.953125 2019-09-25T17:03:27.215024: step 1371, loss 0.153813, acc 0.9375 2019-09-25T17:03:27.336112: step 1372, loss 0.154298, acc 0.921875 2019-09-25T17:03:27.471203: step 1373, loss 0.187604, acc 0.90625 2019-09-25T17:03:27.589922: step 1374, loss 0.268829, acc 0.90625 2019-09-25T17:03:27.711235: step 1375, loss 0.125623, acc 0.953125 2019-09-25T17:03:27.831494: step 1376, loss 0.229956, acc 0.890625 2019-09-25T17:03:27.952460: step 1377, loss 0.115397, acc 0.96875 2019-09-25T17:03:28.073686: step 1378, loss 0.185663, acc 0.9375 2019-09-25T17:03:28.195103: step 1379, loss 0.141373, acc 0.9375 2019-09-25T17:03:28.318328: step 1380, loss 0.174256, acc 0.921875 2019-09-25T17:03:28.442706: step 1381, loss 0.179822, acc 0.9375 2019-09-25T17:03:28.569537: step 1382, loss 0.172264, acc 0.921875 2019-09-25T17:03:28.689493: step 1383, loss 0.173567, acc 0.9375 2019-09-25T17:03:28.811410: step 1384, loss 0.237783, acc 0.921875 2019-09-25T17:03:28.933676: step 1385, loss 0.244293, acc 0.9375 2019-09-25T17:03:29.055689: step 1386, loss 0.225194, acc 0.859375 2019-09-25T17:03:29.177239: step 1387, loss 0.307551, acc 0.921875 2019-09-25T17:03:29.298983: step 1388, loss 0.117047, acc 0.953125 2019-09-25T17:03:29.423738: step 1389, loss 0.28456, acc 0.859375 2019-09-25T17:03:29.557042: step 1390, loss 0.192923, acc 0.921875 2019-09-25T17:03:29.675104: step 1391, loss 0.205191, acc 0.90625 2019-09-25T17:03:29.794908: step 1392, loss 0.160506, acc 0.921875 2019-09-25T17:03:29.915179: step 1393, loss 0.192893, acc 0.90625 2019-09-25T17:03:30.036576: step 1394, loss 0.102068, acc 0.984375 2019-09-25T17:03:30.160976: step 1395, loss 0.270772, acc 0.875 2019-09-25T17:03:30.282681: step 1396, loss 0.210162, acc 0.90625 2019-09-25T17:03:30.403664: step 1397, loss 0.223807, acc 0.890625 2019-09-25T17:03:30.525077: step 1398, loss 0.145058, acc 0.953125 2019-09-25T17:03:30.650844: step 1399, loss 0.215051, acc 0.9375 2019-09-25T17:03:30.776288: step 1400, loss 0.309654, acc 0.875 Evaluation: 2019-09-25T17:03:30.977936: step 1400, loss 0.624332, acc 0.724203 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1400 2019-09-25T17:03:31.242435: step 1401, loss 0.14479, acc 0.953125 2019-09-25T17:03:31.370157: step 1402, loss 0.307525, acc 0.875 2019-09-25T17:03:31.497069: step 1403, loss 0.25098, acc 0.921875 2019-09-25T17:03:31.627880: step 1404, loss 0.143809, acc 0.96875 2019-09-25T17:03:31.753089: step 1405, loss 0.203235, acc 0.90625 2019-09-25T17:03:31.877127: step 1406, loss 0.198398, acc 0.90625 2019-09-25T17:03:32.001813: step 1407, loss 0.230236, acc 0.90625 2019-09-25T17:03:32.131070: step 1408, loss 0.117359, acc 0.96875 2019-09-25T17:03:32.254915: step 1409, loss 0.258448, acc 0.90625 2019-09-25T17:03:32.379195: step 1410, loss 0.122771, acc 0.984375 2019-09-25T17:03:32.503860: step 1411, loss 0.0720751, acc 1 2019-09-25T17:03:32.629937: step 1412, loss 0.217787, acc 0.921875 2019-09-25T17:03:32.752102: step 1413, loss 0.208301, acc 0.953125 2019-09-25T17:03:32.874627: step 1414, loss 0.170682, acc 0.921875 2019-09-25T17:03:33.017278: step 1415, loss 0.230732, acc 0.9375 2019-09-25T17:03:33.140804: step 1416, loss 0.23103, acc 0.921875 2019-09-25T17:03:33.260224: step 1417, loss 0.184659, acc 0.921875 2019-09-25T17:03:33.385024: step 1418, loss 0.182918, acc 0.9375 2019-09-25T17:03:33.507464: step 1419, loss 0.174974, acc 0.921875 2019-09-25T17:03:33.628916: step 1420, loss 0.149244, acc 0.953125 2019-09-25T17:03:33.758240: step 1421, loss 0.151389, acc 0.953125 2019-09-25T17:03:33.882872: step 1422, loss 0.110824, acc 0.953125 2019-09-25T17:03:34.004844: step 1423, loss 0.134455, acc 0.953125 2019-09-25T17:03:34.124256: step 1424, loss 0.153009, acc 0.921875 2019-09-25T17:03:34.244022: step 1425, loss 0.275537, acc 0.890625 2019-09-25T17:03:34.368198: step 1426, loss 0.131053, acc 0.9375 2019-09-25T17:03:34.488471: step 1427, loss 0.0818719, acc 0.984375 2019-09-25T17:03:34.608779: step 1428, loss 0.143296, acc 0.96875 2019-09-25T17:03:34.739725: step 1429, loss 0.174539, acc 0.9375 2019-09-25T17:03:34.861173: step 1430, loss 0.327896, acc 0.84375 2019-09-25T17:03:34.980698: step 1431, loss 0.278628, acc 0.875 2019-09-25T17:03:35.100189: step 1432, loss 0.183632, acc 0.921875 2019-09-25T17:03:35.222640: step 1433, loss 0.118824, acc 0.96875 2019-09-25T17:03:35.343316: step 1434, loss 0.143418, acc 0.9375 2019-09-25T17:03:35.463927: step 1435, loss 0.139227, acc 0.9375 2019-09-25T17:03:35.584837: step 1436, loss 0.139333, acc 0.921875 2019-09-25T17:03:35.707650: step 1437, loss 0.16787, acc 0.953125 2019-09-25T17:03:35.838389: step 1438, loss 0.186819, acc 0.90625 2019-09-25T17:03:35.960671: step 1439, loss 0.239016, acc 0.890625 2019-09-25T17:03:36.082818: step 1440, loss 0.125471, acc 0.96875 2019-09-25T17:03:36.207244: step 1441, loss 0.273153, acc 0.890625 2019-09-25T17:03:36.331060: step 1442, loss 0.112931, acc 0.96875 2019-09-25T17:03:36.451507: step 1443, loss 0.349973, acc 0.859375 2019-09-25T17:03:36.574372: step 1444, loss 0.146527, acc 0.921875 2019-09-25T17:03:36.699604: step 1445, loss 0.223991, acc 0.90625 2019-09-25T17:03:36.832177: step 1446, loss 0.119192, acc 0.9375 2019-09-25T17:03:36.952459: step 1447, loss 0.223945, acc 0.921875 2019-09-25T17:03:37.074492: step 1448, loss 0.193204, acc 0.90625 2019-09-25T17:03:37.194170: step 1449, loss 0.215245, acc 0.90625 2019-09-25T17:03:37.314995: step 1450, loss 0.123143, acc 0.953125 2019-09-25T17:03:37.440436: step 1451, loss 0.156793, acc 0.953125 2019-09-25T17:03:37.559389: step 1452, loss 0.149079, acc 0.984375 2019-09-25T17:03:37.679638: step 1453, loss 0.159691, acc 0.953125 2019-09-25T17:03:37.804430: step 1454, loss 0.145707, acc 0.953125 2019-09-25T17:03:37.934795: step 1455, loss 0.276424, acc 0.875 2019-09-25T17:03:38.058275: step 1456, loss 0.241488, acc 0.921875 2019-09-25T17:03:38.177303: step 1457, loss 0.200073, acc 0.921875 2019-09-25T17:03:38.299184: step 1458, loss 0.208665, acc 0.90625 2019-09-25T17:03:38.420397: step 1459, loss 0.194708, acc 0.921875 2019-09-25T17:03:38.540658: step 1460, loss 0.189052, acc 0.921875 2019-09-25T17:03:38.661116: step 1461, loss 0.136505, acc 0.9375 2019-09-25T17:03:38.781930: step 1462, loss 0.232686, acc 0.90625 2019-09-25T17:03:38.919138: step 1463, loss 0.18673, acc 0.921875 2019-09-25T17:03:39.038985: step 1464, loss 0.193804, acc 0.90625 2019-09-25T17:03:39.161385: step 1465, loss 0.161956, acc 0.921875 2019-09-25T17:03:39.282318: step 1466, loss 0.122451, acc 0.953125 2019-09-25T17:03:39.407046: step 1467, loss 0.319354, acc 0.859375 2019-09-25T17:03:39.527980: step 1468, loss 0.149867, acc 0.9375 2019-09-25T17:03:39.647097: step 1469, loss 0.185208, acc 0.90625 2019-09-25T17:03:39.769590: step 1470, loss 0.0696088, acc 1 2019-09-25T17:03:39.888980: step 1471, loss 0.139509, acc 0.921875 2019-09-25T17:03:40.019868: step 1472, loss 0.105749, acc 0.984375 2019-09-25T17:03:40.139912: step 1473, loss 0.161785, acc 0.9375 2019-09-25T17:03:40.260855: step 1474, loss 0.235728, acc 0.921875 2019-09-25T17:03:40.384868: step 1475, loss 0.268676, acc 0.890625 2019-09-25T17:03:40.504849: step 1476, loss 0.144865, acc 0.9375 2019-09-25T17:03:40.623345: step 1477, loss 0.132633, acc 0.953125 2019-09-25T17:03:40.742773: step 1478, loss 0.15123, acc 0.953125 2019-09-25T17:03:40.865110: step 1479, loss 0.211627, acc 0.890625 2019-09-25T17:03:40.990688: step 1480, loss 0.253151, acc 0.921875 2019-09-25T17:03:41.112306: step 1481, loss 0.189034, acc 0.921875 2019-09-25T17:03:41.232688: step 1482, loss 0.170712, acc 0.921875 2019-09-25T17:03:41.353755: step 1483, loss 0.116593, acc 0.953125 2019-09-25T17:03:41.475070: step 1484, loss 0.119487, acc 0.953125 2019-09-25T17:03:41.594280: step 1485, loss 0.219923, acc 0.90625 2019-09-25T17:03:41.713522: step 1486, loss 0.16842, acc 0.90625 2019-09-25T17:03:41.834425: step 1487, loss 0.243343, acc 0.890625 2019-09-25T17:03:41.957404: step 1488, loss 0.12128, acc 0.953125 2019-09-25T17:03:42.089155: step 1489, loss 0.167218, acc 0.9375 2019-09-25T17:03:42.208849: step 1490, loss 0.228124, acc 0.890625 2019-09-25T17:03:42.333192: step 1491, loss 0.142632, acc 0.96875 2019-09-25T17:03:42.453744: step 1492, loss 0.282185, acc 0.859375 2019-09-25T17:03:42.578042: step 1493, loss 0.329805, acc 0.875 2019-09-25T17:03:42.698907: step 1494, loss 0.215053, acc 0.90625 2019-09-25T17:03:42.819873: step 1495, loss 0.126104, acc 1 2019-09-25T17:03:42.940251: step 1496, loss 0.139574, acc 0.9375 2019-09-25T17:03:43.066334: step 1497, loss 0.151552, acc 0.9375 2019-09-25T17:03:43.188425: step 1498, loss 0.201794, acc 0.9375 2019-09-25T17:03:43.310763: step 1499, loss 0.243616, acc 0.890625 2019-09-25T17:03:43.428730: step 1500, loss 0.14753, acc 0.95 Evaluation: 2019-09-25T17:03:43.627179: step 1500, loss 0.626894, acc 0.736398 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1500 2019-09-25T17:03:43.882591: step 1501, loss 0.142598, acc 0.96875 2019-09-25T17:03:44.004449: step 1502, loss 0.239055, acc 0.90625 2019-09-25T17:03:44.140872: step 1503, loss 0.195078, acc 0.890625 2019-09-25T17:03:44.263733: step 1504, loss 0.135864, acc 0.96875 2019-09-25T17:03:44.386049: step 1505, loss 0.0981373, acc 0.96875 2019-09-25T17:03:44.506078: step 1506, loss 0.152867, acc 0.9375 2019-09-25T17:03:44.627704: step 1507, loss 0.217913, acc 0.921875 2019-09-25T17:03:44.748189: step 1508, loss 0.242111, acc 0.921875 2019-09-25T17:03:44.870854: step 1509, loss 0.11891, acc 0.984375 2019-09-25T17:03:44.992067: step 1510, loss 0.0873899, acc 0.984375 2019-09-25T17:03:45.114891: step 1511, loss 0.0900953, acc 0.984375 2019-09-25T17:03:45.240130: step 1512, loss 0.151652, acc 0.90625 2019-09-25T17:03:45.362532: step 1513, loss 0.143872, acc 0.921875 2019-09-25T17:03:45.488425: step 1514, loss 0.170984, acc 0.9375 2019-09-25T17:03:45.610905: step 1515, loss 0.0814279, acc 1 2019-09-25T17:03:45.731825: step 1516, loss 0.121959, acc 0.96875 2019-09-25T17:03:45.854671: step 1517, loss 0.0943016, acc 0.96875 2019-09-25T17:03:45.977276: step 1518, loss 0.0872339, acc 0.953125 2019-09-25T17:03:46.100810: step 1519, loss 0.121539, acc 0.953125 2019-09-25T17:03:46.237285: step 1520, loss 0.198966, acc 0.90625 2019-09-25T17:03:46.361227: step 1521, loss 0.0721747, acc 0.96875 2019-09-25T17:03:46.482888: step 1522, loss 0.19376, acc 0.890625 2019-09-25T17:03:46.604726: step 1523, loss 0.14623, acc 0.921875 2019-09-25T17:03:46.728111: step 1524, loss 0.183175, acc 0.90625 2019-09-25T17:03:46.850885: step 1525, loss 0.12276, acc 0.953125 2019-09-25T17:03:46.973344: step 1526, loss 0.059209, acc 0.984375 2019-09-25T17:03:47.093344: step 1527, loss 0.132837, acc 0.96875 2019-09-25T17:03:47.221745: step 1528, loss 0.147279, acc 0.9375 2019-09-25T17:03:47.344117: step 1529, loss 0.274219, acc 0.921875 2019-09-25T17:03:47.465265: step 1530, loss 0.135477, acc 0.953125 2019-09-25T17:03:47.584265: step 1531, loss 0.0988719, acc 0.984375 2019-09-25T17:03:47.704341: step 1532, loss 0.161626, acc 0.9375 2019-09-25T17:03:47.827887: step 1533, loss 0.144266, acc 0.953125 2019-09-25T17:03:47.947087: step 1534, loss 0.09813, acc 0.96875 2019-09-25T17:03:48.094418: step 1535, loss 0.120536, acc 0.96875 2019-09-25T17:03:48.218869: step 1536, loss 0.158411, acc 0.9375 2019-09-25T17:03:48.351240: step 1537, loss 0.143368, acc 0.921875 2019-09-25T17:03:48.471681: step 1538, loss 0.156556, acc 0.96875 2019-09-25T17:03:48.592873: step 1539, loss 0.103281, acc 0.96875 2019-09-25T17:03:48.712666: step 1540, loss 0.132821, acc 0.9375 2019-09-25T17:03:48.837406: step 1541, loss 0.169636, acc 0.921875 2019-09-25T17:03:48.959316: step 1542, loss 0.218933, acc 0.890625 2019-09-25T17:03:49.080556: step 1543, loss 0.133249, acc 0.953125 2019-09-25T17:03:49.201119: step 1544, loss 0.0914324, acc 0.953125 2019-09-25T17:03:49.335042: step 1545, loss 0.119181, acc 0.953125 2019-09-25T17:03:49.461476: step 1546, loss 0.220632, acc 0.90625 2019-09-25T17:03:49.587759: step 1547, loss 0.14317, acc 0.953125 2019-09-25T17:03:49.707491: step 1548, loss 0.19542, acc 0.921875 2019-09-25T17:03:49.828161: step 1549, loss 0.174585, acc 0.9375 2019-09-25T17:03:49.950848: step 1550, loss 0.136545, acc 0.921875 2019-09-25T17:03:50.079818: step 1551, loss 0.0893843, acc 0.96875 2019-09-25T17:03:50.202105: step 1552, loss 0.129988, acc 0.9375 2019-09-25T17:03:50.328719: step 1553, loss 0.203939, acc 0.921875 2019-09-25T17:03:50.450768: step 1554, loss 0.190358, acc 0.890625 2019-09-25T17:03:50.574023: step 1555, loss 0.103878, acc 0.953125 2019-09-25T17:03:50.693838: step 1556, loss 0.18269, acc 0.90625 2019-09-25T17:03:50.817552: step 1557, loss 0.1245, acc 0.96875 2019-09-25T17:03:50.939300: step 1558, loss 0.0861716, acc 0.984375 2019-09-25T17:03:51.060739: step 1559, loss 0.0788143, acc 0.984375 2019-09-25T17:03:51.182531: step 1560, loss 0.193825, acc 0.90625 2019-09-25T17:03:51.305533: step 1561, loss 0.171187, acc 0.9375 2019-09-25T17:03:51.437217: step 1562, loss 0.0587249, acc 0.984375 2019-09-25T17:03:51.559811: step 1563, loss 0.201649, acc 0.921875 2019-09-25T17:03:51.679122: step 1564, loss 0.100982, acc 0.953125 2019-09-25T17:03:51.798247: step 1565, loss 0.122065, acc 0.96875 2019-09-25T17:03:51.917409: step 1566, loss 0.0553483, acc 0.984375 2019-09-25T17:03:52.040120: step 1567, loss 0.128409, acc 0.953125 2019-09-25T17:03:52.162270: step 1568, loss 0.100523, acc 0.96875 2019-09-25T17:03:52.283521: step 1569, loss 0.138298, acc 0.9375 2019-09-25T17:03:52.410009: step 1570, loss 0.172901, acc 0.9375 2019-09-25T17:03:52.530744: step 1571, loss 0.172627, acc 0.921875 2019-09-25T17:03:52.649049: step 1572, loss 0.124628, acc 0.9375 2019-09-25T17:03:52.767726: step 1573, loss 0.10968, acc 0.984375 2019-09-25T17:03:52.891964: step 1574, loss 0.140319, acc 0.96875 2019-09-25T17:03:53.012652: step 1575, loss 0.114215, acc 0.96875 2019-09-25T17:03:53.133461: step 1576, loss 0.229698, acc 0.90625 2019-09-25T17:03:53.253384: step 1577, loss 0.129467, acc 0.96875 2019-09-25T17:03:53.374658: step 1578, loss 0.225814, acc 0.90625 2019-09-25T17:03:53.508875: step 1579, loss 0.176336, acc 0.9375 2019-09-25T17:03:53.630172: step 1580, loss 0.185816, acc 0.890625 2019-09-25T17:03:53.749329: step 1581, loss 0.128777, acc 0.953125 2019-09-25T17:03:53.874260: step 1582, loss 0.0878195, acc 0.984375 2019-09-25T17:03:53.995505: step 1583, loss 0.331765, acc 0.875 2019-09-25T17:03:54.115258: step 1584, loss 0.19366, acc 0.890625 2019-09-25T17:03:54.234492: step 1585, loss 0.165528, acc 0.921875 2019-09-25T17:03:54.355945: step 1586, loss 0.211936, acc 0.921875 2019-09-25T17:03:54.481937: step 1587, loss 0.11945, acc 0.9375 2019-09-25T17:03:54.605005: step 1588, loss 0.129129, acc 0.9375 2019-09-25T17:03:54.724963: step 1589, loss 0.155841, acc 0.9375 2019-09-25T17:03:54.847583: step 1590, loss 0.092787, acc 0.96875 2019-09-25T17:03:54.969559: step 1591, loss 0.124454, acc 0.953125 2019-09-25T17:03:55.088869: step 1592, loss 0.115442, acc 0.96875 2019-09-25T17:03:55.210148: step 1593, loss 0.145057, acc 0.953125 2019-09-25T17:03:55.330626: step 1594, loss 0.100482, acc 0.953125 2019-09-25T17:03:55.450368: step 1595, loss 0.183015, acc 0.90625 2019-09-25T17:03:55.582273: step 1596, loss 0.0726837, acc 0.984375 2019-09-25T17:03:55.704272: step 1597, loss 0.125614, acc 0.953125 2019-09-25T17:03:55.824784: step 1598, loss 0.133947, acc 0.90625 2019-09-25T17:03:55.948117: step 1599, loss 0.159896, acc 0.921875 2019-09-25T17:03:56.068346: step 1600, loss 0.163219, acc 0.921875 Evaluation: 2019-09-25T17:03:56.270618: step 1600, loss 0.658401, acc 0.739212 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1600 2019-09-25T17:03:56.534376: step 1601, loss 0.104593, acc 0.96875 2019-09-25T17:03:56.660284: step 1602, loss 0.209635, acc 0.9375 2019-09-25T17:03:56.780033: step 1603, loss 0.198047, acc 0.90625 2019-09-25T17:03:56.901116: step 1604, loss 0.169318, acc 0.953125 2019-09-25T17:03:57.022997: step 1605, loss 0.16422, acc 0.921875 2019-09-25T17:03:57.141730: step 1606, loss 0.122469, acc 0.9375 2019-09-25T17:03:57.263091: step 1607, loss 0.124679, acc 0.96875 2019-09-25T17:03:57.384450: step 1608, loss 0.149795, acc 0.90625 2019-09-25T17:03:57.504440: step 1609, loss 0.0686633, acc 0.984375 2019-09-25T17:03:57.637270: step 1610, loss 0.101584, acc 0.96875 2019-09-25T17:03:57.756825: step 1611, loss 0.226796, acc 0.890625 2019-09-25T17:03:57.878345: step 1612, loss 0.214623, acc 0.921875 2019-09-25T17:03:57.998258: step 1613, loss 0.203124, acc 0.890625 2019-09-25T17:03:58.117284: step 1614, loss 0.159705, acc 0.9375 2019-09-25T17:03:58.260134: step 1615, loss 0.249506, acc 0.921875 2019-09-25T17:03:58.383927: step 1616, loss 0.103608, acc 0.953125 2019-09-25T17:03:58.506424: step 1617, loss 0.13274, acc 0.953125 2019-09-25T17:03:58.634732: step 1618, loss 0.106602, acc 0.984375 2019-09-25T17:03:58.752853: step 1619, loss 0.204922, acc 0.90625 2019-09-25T17:03:58.873309: step 1620, loss 0.103503, acc 0.96875 2019-09-25T17:03:58.994116: step 1621, loss 0.356509, acc 0.875 2019-09-25T17:03:59.115614: step 1622, loss 0.162717, acc 0.953125 2019-09-25T17:03:59.235152: step 1623, loss 0.132871, acc 0.9375 2019-09-25T17:03:59.357806: step 1624, loss 0.184358, acc 0.921875 2019-09-25T17:03:59.478561: step 1625, loss 0.161409, acc 0.9375 2019-09-25T17:03:59.597358: step 1626, loss 0.0998186, acc 0.953125 2019-09-25T17:03:59.728910: step 1627, loss 0.161979, acc 0.953125 2019-09-25T17:03:59.867075: step 1628, loss 0.172996, acc 0.953125 2019-09-25T17:03:59.990781: step 1629, loss 0.0916478, acc 0.96875 2019-09-25T17:04:00.110759: step 1630, loss 0.210776, acc 0.90625 2019-09-25T17:04:00.230343: step 1631, loss 0.147315, acc 0.9375 2019-09-25T17:04:00.351933: step 1632, loss 0.237752, acc 0.890625 2019-09-25T17:04:00.475635: step 1633, loss 0.144156, acc 0.953125 2019-09-25T17:04:00.596910: step 1634, loss 0.134377, acc 0.953125 2019-09-25T17:04:00.724431: step 1635, loss 0.0933097, acc 0.96875 2019-09-25T17:04:00.846924: step 1636, loss 0.166141, acc 0.953125 2019-09-25T17:04:00.974434: step 1637, loss 0.150086, acc 0.953125 2019-09-25T17:04:01.096072: step 1638, loss 0.212096, acc 0.890625 2019-09-25T17:04:01.217712: step 1639, loss 0.219308, acc 0.9375 2019-09-25T17:04:01.338916: step 1640, loss 0.177056, acc 0.921875 2019-09-25T17:04:01.460680: step 1641, loss 0.13256, acc 0.96875 2019-09-25T17:04:01.582248: step 1642, loss 0.137414, acc 0.9375 2019-09-25T17:04:01.703071: step 1643, loss 0.225411, acc 0.875 2019-09-25T17:04:01.835919: step 1644, loss 0.174418, acc 0.9375 2019-09-25T17:04:01.955080: step 1645, loss 0.148306, acc 0.9375 2019-09-25T17:04:02.077836: step 1646, loss 0.192527, acc 0.953125 2019-09-25T17:04:02.200468: step 1647, loss 0.122191, acc 0.953125 2019-09-25T17:04:02.321616: step 1648, loss 0.192725, acc 0.953125 2019-09-25T17:04:02.444454: step 1649, loss 0.178463, acc 0.921875 2019-09-25T17:04:02.559215: step 1650, loss 0.0895142, acc 0.95 2019-09-25T17:04:02.681144: step 1651, loss 0.226019, acc 0.875 2019-09-25T17:04:02.809574: step 1652, loss 0.155046, acc 0.9375 2019-09-25T17:04:02.931653: step 1653, loss 0.136911, acc 0.9375 2019-09-25T17:04:03.051141: step 1654, loss 0.0656125, acc 0.984375 2019-09-25T17:04:03.171409: step 1655, loss 0.163172, acc 0.9375 2019-09-25T17:04:03.293179: step 1656, loss 0.177986, acc 0.9375 2019-09-25T17:04:03.415553: step 1657, loss 0.0913116, acc 0.953125 2019-09-25T17:04:03.534759: step 1658, loss 0.0390047, acc 1 2019-09-25T17:04:03.652644: step 1659, loss 0.123908, acc 0.9375 2019-09-25T17:04:03.773110: step 1660, loss 0.083944, acc 0.953125 2019-09-25T17:04:03.908325: step 1661, loss 0.0643999, acc 1 2019-09-25T17:04:04.028464: step 1662, loss 0.125596, acc 0.984375 2019-09-25T17:04:04.149971: step 1663, loss 0.0684675, acc 0.96875 2019-09-25T17:04:04.271715: step 1664, loss 0.12124, acc 0.984375 2019-09-25T17:04:04.395059: step 1665, loss 0.0633051, acc 0.984375 2019-09-25T17:04:04.513159: step 1666, loss 0.152265, acc 0.96875 2019-09-25T17:04:04.633450: step 1667, loss 0.0749862, acc 0.984375 2019-09-25T17:04:04.752854: step 1668, loss 0.202173, acc 0.890625 2019-09-25T17:04:04.881174: step 1669, loss 0.0880858, acc 0.953125 2019-09-25T17:04:05.002353: step 1670, loss 0.168204, acc 0.921875 2019-09-25T17:04:05.121039: step 1671, loss 0.0749672, acc 0.96875 2019-09-25T17:04:05.242213: step 1672, loss 0.128941, acc 0.953125 2019-09-25T17:04:05.366575: step 1673, loss 0.215108, acc 0.90625 2019-09-25T17:04:05.487236: step 1674, loss 0.0987374, acc 0.953125 2019-09-25T17:04:05.607929: step 1675, loss 0.090508, acc 0.984375 2019-09-25T17:04:05.730734: step 1676, loss 0.102878, acc 0.953125 2019-09-25T17:04:05.851112: step 1677, loss 0.0937995, acc 0.984375 2019-09-25T17:04:05.984173: step 1678, loss 0.165599, acc 0.921875 2019-09-25T17:04:06.102208: step 1679, loss 0.0844599, acc 0.96875 2019-09-25T17:04:06.225935: step 1680, loss 0.0820809, acc 1 2019-09-25T17:04:06.350510: step 1681, loss 0.101783, acc 0.96875 2019-09-25T17:04:06.470256: step 1682, loss 0.0766177, acc 0.984375 2019-09-25T17:04:06.589453: step 1683, loss 0.0568384, acc 0.984375 2019-09-25T17:04:06.712851: step 1684, loss 0.0827146, acc 0.96875 2019-09-25T17:04:06.835558: step 1685, loss 0.169421, acc 0.953125 2019-09-25T17:04:06.967189: step 1686, loss 0.0629564, acc 1 2019-09-25T17:04:07.086199: step 1687, loss 0.0874547, acc 0.953125 2019-09-25T17:04:07.206067: step 1688, loss 0.160394, acc 0.921875 2019-09-25T17:04:07.331126: step 1689, loss 0.0599889, acc 0.984375 2019-09-25T17:04:07.455364: step 1690, loss 0.0678788, acc 0.96875 2019-09-25T17:04:07.575465: step 1691, loss 0.0907027, acc 0.96875 2019-09-25T17:04:07.693984: step 1692, loss 0.0834859, acc 0.96875 2019-09-25T17:04:07.813676: step 1693, loss 0.0764491, acc 0.96875 2019-09-25T17:04:07.933445: step 1694, loss 0.181195, acc 0.921875 2019-09-25T17:04:08.066408: step 1695, loss 0.0849935, acc 0.96875 2019-09-25T17:04:08.188581: step 1696, loss 0.12228, acc 0.953125 2019-09-25T17:04:08.309731: step 1697, loss 0.0972411, acc 0.953125 2019-09-25T17:04:08.434669: step 1698, loss 0.118152, acc 0.953125 2019-09-25T17:04:08.554424: step 1699, loss 0.103623, acc 0.96875 2019-09-25T17:04:08.674818: step 1700, loss 0.135704, acc 0.921875 Evaluation: 2019-09-25T17:04:08.874898: step 1700, loss 0.680642, acc 0.74015 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1700 2019-09-25T17:04:09.138645: step 1701, loss 0.127634, acc 0.953125 2019-09-25T17:04:09.259045: step 1702, loss 0.117096, acc 0.953125 2019-09-25T17:04:09.382972: step 1703, loss 0.137057, acc 0.9375 2019-09-25T17:04:09.502690: step 1704, loss 0.0989036, acc 0.96875 2019-09-25T17:04:09.623125: step 1705, loss 0.182134, acc 0.921875 2019-09-25T17:04:09.742478: step 1706, loss 0.165182, acc 0.90625 2019-09-25T17:04:09.863181: step 1707, loss 0.149779, acc 0.921875 2019-09-25T17:04:09.981679: step 1708, loss 0.132903, acc 0.953125 2019-09-25T17:04:10.113286: step 1709, loss 0.114042, acc 0.96875 2019-09-25T17:04:10.233671: step 1710, loss 0.15688, acc 0.9375 2019-09-25T17:04:10.355895: step 1711, loss 0.0578376, acc 0.984375 2019-09-25T17:04:10.476635: step 1712, loss 0.0494443, acc 0.984375 2019-09-25T17:04:10.597552: step 1713, loss 0.195097, acc 0.90625 2019-09-25T17:04:10.716750: step 1714, loss 0.233459, acc 0.890625 2019-09-25T17:04:10.839264: step 1715, loss 0.10055, acc 0.953125 2019-09-25T17:04:10.958490: step 1716, loss 0.156155, acc 0.9375 2019-09-25T17:04:11.081291: step 1717, loss 0.156086, acc 0.953125 2019-09-25T17:04:11.203798: step 1718, loss 0.0279584, acc 1 2019-09-25T17:04:11.328082: step 1719, loss 0.0959557, acc 0.96875 2019-09-25T17:04:11.447739: step 1720, loss 0.162418, acc 0.953125 2019-09-25T17:04:11.567115: step 1721, loss 0.0721378, acc 0.96875 2019-09-25T17:04:11.686906: step 1722, loss 0.10973, acc 0.921875 2019-09-25T17:04:11.810577: step 1723, loss 0.0906031, acc 0.96875 2019-09-25T17:04:11.931489: step 1724, loss 0.121053, acc 0.953125 2019-09-25T17:04:12.052140: step 1725, loss 0.121824, acc 0.9375 2019-09-25T17:04:12.184078: step 1726, loss 0.0616777, acc 0.984375 2019-09-25T17:04:12.304975: step 1727, loss 0.199884, acc 0.9375 2019-09-25T17:04:12.427025: step 1728, loss 0.113985, acc 0.9375 2019-09-25T17:04:12.549650: step 1729, loss 0.0706122, acc 1 2019-09-25T17:04:12.666875: step 1730, loss 0.130105, acc 0.953125 2019-09-25T17:04:12.784098: step 1731, loss 0.0634708, acc 0.984375 2019-09-25T17:04:12.904399: step 1732, loss 0.127977, acc 0.96875 2019-09-25T17:04:13.023792: step 1733, loss 0.11468, acc 0.96875 2019-09-25T17:04:13.145432: step 1734, loss 0.0829672, acc 0.96875 2019-09-25T17:04:13.272643: step 1735, loss 0.101082, acc 0.984375 2019-09-25T17:04:13.418748: step 1736, loss 0.134483, acc 0.9375 2019-09-25T17:04:13.540388: step 1737, loss 0.0884049, acc 0.984375 2019-09-25T17:04:13.661168: step 1738, loss 0.0903894, acc 0.96875 2019-09-25T17:04:13.789803: step 1739, loss 0.181341, acc 0.921875 2019-09-25T17:04:13.909922: step 1740, loss 0.111705, acc 0.96875 2019-09-25T17:04:14.034891: step 1741, loss 0.0505235, acc 1 2019-09-25T17:04:14.155814: step 1742, loss 0.0727859, acc 1 2019-09-25T17:04:14.290439: step 1743, loss 0.14097, acc 0.9375 2019-09-25T17:04:14.414951: step 1744, loss 0.0830626, acc 0.96875 2019-09-25T17:04:14.535365: step 1745, loss 0.0800816, acc 0.96875 2019-09-25T17:04:14.656387: step 1746, loss 0.127388, acc 0.953125 2019-09-25T17:04:14.780192: step 1747, loss 0.04767, acc 0.96875 2019-09-25T17:04:14.903416: step 1748, loss 0.134965, acc 0.9375 2019-09-25T17:04:15.024742: step 1749, loss 0.135787, acc 0.9375 2019-09-25T17:04:15.149599: step 1750, loss 0.0950153, acc 0.96875 2019-09-25T17:04:15.272352: step 1751, loss 0.17954, acc 0.921875 2019-09-25T17:04:15.398044: step 1752, loss 0.186145, acc 0.90625 2019-09-25T17:04:15.521411: step 1753, loss 0.142575, acc 0.953125 2019-09-25T17:04:15.642877: step 1754, loss 0.0975715, acc 0.96875 2019-09-25T17:04:15.766302: step 1755, loss 0.169742, acc 0.921875 2019-09-25T17:04:15.886517: step 1756, loss 0.0720428, acc 0.96875 2019-09-25T17:04:16.005135: step 1757, loss 0.17177, acc 0.9375 2019-09-25T17:04:16.125836: step 1758, loss 0.0720384, acc 0.984375 2019-09-25T17:04:16.243778: step 1759, loss 0.135165, acc 0.953125 2019-09-25T17:04:16.380914: step 1760, loss 0.0345472, acc 0.984375 2019-09-25T17:04:16.502138: step 1761, loss 0.0640228, acc 0.984375 2019-09-25T17:04:16.624213: step 1762, loss 0.0975421, acc 0.953125 2019-09-25T17:04:16.746422: step 1763, loss 0.129078, acc 0.9375 2019-09-25T17:04:16.869730: step 1764, loss 0.0866688, acc 0.953125 2019-09-25T17:04:16.988989: step 1765, loss 0.112757, acc 0.953125 2019-09-25T17:04:17.109256: step 1766, loss 0.0754264, acc 0.953125 2019-09-25T17:04:17.228908: step 1767, loss 0.0725423, acc 0.96875 2019-09-25T17:04:17.355081: step 1768, loss 0.0577028, acc 0.984375 2019-09-25T17:04:17.474960: step 1769, loss 0.151518, acc 0.921875 2019-09-25T17:04:17.593492: step 1770, loss 0.0517627, acc 0.984375 2019-09-25T17:04:17.712724: step 1771, loss 0.172768, acc 0.953125 2019-09-25T17:04:17.835257: step 1772, loss 0.220276, acc 0.90625 2019-09-25T17:04:17.954766: step 1773, loss 0.13192, acc 0.9375 2019-09-25T17:04:18.073564: step 1774, loss 0.223871, acc 0.890625 2019-09-25T17:04:18.194213: step 1775, loss 0.0784567, acc 0.96875 2019-09-25T17:04:18.315378: step 1776, loss 0.106866, acc 0.96875 2019-09-25T17:04:18.450361: step 1777, loss 0.0945089, acc 0.953125 2019-09-25T17:04:18.571655: step 1778, loss 0.0717622, acc 0.96875 2019-09-25T17:04:18.691233: step 1779, loss 0.139484, acc 0.90625 2019-09-25T17:04:18.811006: step 1780, loss 0.138184, acc 0.9375 2019-09-25T17:04:18.930073: step 1781, loss 0.113492, acc 0.96875 2019-09-25T17:04:19.050352: step 1782, loss 0.166926, acc 0.921875 2019-09-25T17:04:19.172624: step 1783, loss 0.129462, acc 0.953125 2019-09-25T17:04:19.296957: step 1784, loss 0.111945, acc 0.96875 2019-09-25T17:04:19.426308: step 1785, loss 0.0893495, acc 0.96875 2019-09-25T17:04:19.546035: step 1786, loss 0.0440597, acc 0.984375 2019-09-25T17:04:19.666740: step 1787, loss 0.155967, acc 0.921875 2019-09-25T17:04:19.791183: step 1788, loss 0.096215, acc 0.96875 2019-09-25T17:04:19.912388: step 1789, loss 0.136031, acc 0.953125 2019-09-25T17:04:20.032619: step 1790, loss 0.0507273, acc 1 2019-09-25T17:04:20.151110: step 1791, loss 0.0691628, acc 0.984375 2019-09-25T17:04:20.270956: step 1792, loss 0.136732, acc 0.96875 2019-09-25T17:04:20.391969: step 1793, loss 0.14216, acc 0.953125 2019-09-25T17:04:20.526150: step 1794, loss 0.202185, acc 0.921875 2019-09-25T17:04:20.645758: step 1795, loss 0.110128, acc 0.953125 2019-09-25T17:04:20.764887: step 1796, loss 0.0715695, acc 1 2019-09-25T17:04:20.885968: step 1797, loss 0.075945, acc 0.953125 2019-09-25T17:04:21.003622: step 1798, loss 0.209722, acc 0.921875 2019-09-25T17:04:21.123371: step 1799, loss 0.174097, acc 0.9375 2019-09-25T17:04:21.240376: step 1800, loss 0.151472, acc 0.916667 Evaluation: 2019-09-25T17:04:21.454594: step 1800, loss 0.711517, acc 0.737336 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1800 2019-09-25T17:04:21.694705: step 1801, loss 0.0618235, acc 1 2019-09-25T17:04:21.825395: step 1802, loss 0.0473705, acc 0.984375 2019-09-25T17:04:21.970602: step 1803, loss 0.108605, acc 0.984375 2019-09-25T17:04:22.090000: step 1804, loss 0.0392809, acc 1 2019-09-25T17:04:22.208683: step 1805, loss 0.0831342, acc 0.96875 2019-09-25T17:04:22.329274: step 1806, loss 0.0482354, acc 1 2019-09-25T17:04:22.451167: step 1807, loss 0.0383952, acc 0.984375 2019-09-25T17:04:22.585399: step 1808, loss 0.116289, acc 0.96875 2019-09-25T17:04:22.705481: step 1809, loss 0.111145, acc 0.953125 2019-09-25T17:04:22.827640: step 1810, loss 0.155079, acc 0.921875 2019-09-25T17:04:22.949101: step 1811, loss 0.0389889, acc 0.984375 2019-09-25T17:04:23.069095: step 1812, loss 0.0772047, acc 0.984375 2019-09-25T17:04:23.188040: step 1813, loss 0.0581682, acc 0.96875 2019-09-25T17:04:23.306029: step 1814, loss 0.141594, acc 0.953125 2019-09-25T17:04:23.428540: step 1815, loss 0.0310214, acc 1 2019-09-25T17:04:23.578186: step 1816, loss 0.078623, acc 0.96875 2019-09-25T17:04:23.700082: step 1817, loss 0.0718994, acc 0.953125 2019-09-25T17:04:23.820434: step 1818, loss 0.15456, acc 0.921875 2019-09-25T17:04:23.940993: step 1819, loss 0.0831238, acc 0.953125 2019-09-25T17:04:24.062600: step 1820, loss 0.0975797, acc 0.953125 2019-09-25T17:04:24.184337: step 1821, loss 0.0591241, acc 0.984375 2019-09-25T17:04:24.305374: step 1822, loss 0.100121, acc 0.9375 2019-09-25T17:04:24.426865: step 1823, loss 0.0494974, acc 0.984375 2019-09-25T17:04:24.547502: step 1824, loss 0.115665, acc 0.9375 2019-09-25T17:04:24.680730: step 1825, loss 0.0851762, acc 0.984375 2019-09-25T17:04:24.801560: step 1826, loss 0.043084, acc 0.984375 2019-09-25T17:04:24.921515: step 1827, loss 0.0526426, acc 1 2019-09-25T17:04:25.041473: step 1828, loss 0.114666, acc 0.953125 2019-09-25T17:04:25.162673: step 1829, loss 0.0992108, acc 0.953125 2019-09-25T17:04:25.284666: step 1830, loss 0.0825998, acc 0.96875 2019-09-25T17:04:25.406664: step 1831, loss 0.0663555, acc 0.96875 2019-09-25T17:04:25.532786: step 1832, loss 0.0973903, acc 0.953125 2019-09-25T17:04:25.656380: step 1833, loss 0.0483035, acc 1 2019-09-25T17:04:25.779504: step 1834, loss 0.106249, acc 0.9375 2019-09-25T17:04:25.900993: step 1835, loss 0.16397, acc 0.9375 2019-09-25T17:04:26.023019: step 1836, loss 0.0734814, acc 0.96875 2019-09-25T17:04:26.145271: step 1837, loss 0.149991, acc 0.9375 2019-09-25T17:04:26.265700: step 1838, loss 0.111659, acc 0.984375 2019-09-25T17:04:26.388184: step 1839, loss 0.092175, acc 0.953125 2019-09-25T17:04:26.509749: step 1840, loss 0.0748242, acc 0.984375 2019-09-25T17:04:26.630177: step 1841, loss 0.140026, acc 0.953125 2019-09-25T17:04:26.764496: step 1842, loss 0.0657529, acc 0.984375 2019-09-25T17:04:26.884621: step 1843, loss 0.0954285, acc 0.96875 2019-09-25T17:04:27.005338: step 1844, loss 0.146172, acc 0.921875 2019-09-25T17:04:27.126032: step 1845, loss 0.13098, acc 0.96875 2019-09-25T17:04:27.246545: step 1846, loss 0.092255, acc 0.96875 2019-09-25T17:04:27.369660: step 1847, loss 0.182725, acc 0.984375 2019-09-25T17:04:27.489188: step 1848, loss 0.0524682, acc 1 2019-09-25T17:04:27.608235: step 1849, loss 0.104614, acc 0.96875 2019-09-25T17:04:27.741556: step 1850, loss 0.0405202, acc 1 2019-09-25T17:04:27.863687: step 1851, loss 0.0417019, acc 1 2019-09-25T17:04:27.985438: step 1852, loss 0.222117, acc 0.875 2019-09-25T17:04:28.108603: step 1853, loss 0.118552, acc 0.96875 2019-09-25T17:04:28.228805: step 1854, loss 0.0842388, acc 0.984375 2019-09-25T17:04:28.351572: step 1855, loss 0.0695188, acc 0.984375 2019-09-25T17:04:28.471181: step 1856, loss 0.0734805, acc 0.96875 2019-09-25T17:04:28.594133: step 1857, loss 0.230785, acc 0.921875 2019-09-25T17:04:28.715341: step 1858, loss 0.107803, acc 0.96875 2019-09-25T17:04:28.850001: step 1859, loss 0.0548717, acc 0.984375 2019-09-25T17:04:28.970213: step 1860, loss 0.0639511, acc 0.96875 2019-09-25T17:04:29.092618: step 1861, loss 0.0959749, acc 0.96875 2019-09-25T17:04:29.211591: step 1862, loss 0.023002, acc 1 2019-09-25T17:04:29.334911: step 1863, loss 0.0964551, acc 0.9375 2019-09-25T17:04:29.459148: step 1864, loss 0.0807147, acc 0.9375 2019-09-25T17:04:29.582540: step 1865, loss 0.098072, acc 0.953125 2019-09-25T17:04:29.701844: step 1866, loss 0.0754421, acc 0.953125 2019-09-25T17:04:29.840657: step 1867, loss 0.0571009, acc 0.984375 2019-09-25T17:04:29.962825: step 1868, loss 0.0513164, acc 0.984375 2019-09-25T17:04:30.082561: step 1869, loss 0.0410084, acc 1 2019-09-25T17:04:30.201567: step 1870, loss 0.0131209, acc 1 2019-09-25T17:04:30.322939: step 1871, loss 0.15735, acc 0.953125 2019-09-25T17:04:30.442765: step 1872, loss 0.129706, acc 0.96875 2019-09-25T17:04:30.561209: step 1873, loss 0.106375, acc 0.96875 2019-09-25T17:04:30.681994: step 1874, loss 0.225695, acc 0.921875 2019-09-25T17:04:30.801246: step 1875, loss 0.0595145, acc 0.984375 2019-09-25T17:04:30.933846: step 1876, loss 0.184783, acc 0.921875 2019-09-25T17:04:31.054755: step 1877, loss 0.0978806, acc 0.9375 2019-09-25T17:04:31.174175: step 1878, loss 0.118594, acc 0.9375 2019-09-25T17:04:31.296384: step 1879, loss 0.0711252, acc 0.96875 2019-09-25T17:04:31.423073: step 1880, loss 0.187362, acc 0.953125 2019-09-25T17:04:31.543162: step 1881, loss 0.0648599, acc 0.96875 2019-09-25T17:04:31.662825: step 1882, loss 0.05258, acc 0.984375 2019-09-25T17:04:31.784108: step 1883, loss 0.0851086, acc 0.96875 2019-09-25T17:04:31.914517: step 1884, loss 0.165148, acc 0.890625 2019-09-25T17:04:32.038847: step 1885, loss 0.192524, acc 0.921875 2019-09-25T17:04:32.161737: step 1886, loss 0.120687, acc 0.96875 2019-09-25T17:04:32.283578: step 1887, loss 0.12136, acc 0.96875 2019-09-25T17:04:32.405865: step 1888, loss 0.0752411, acc 0.96875 2019-09-25T17:04:32.528142: step 1889, loss 0.0880247, acc 0.953125 2019-09-25T17:04:32.649367: step 1890, loss 0.138174, acc 0.953125 2019-09-25T17:04:32.770186: step 1891, loss 0.100627, acc 0.921875 2019-09-25T17:04:32.892147: step 1892, loss 0.0942398, acc 0.953125 2019-09-25T17:04:33.022700: step 1893, loss 0.147666, acc 0.953125 2019-09-25T17:04:33.143608: step 1894, loss 0.116303, acc 0.953125 2019-09-25T17:04:33.265683: step 1895, loss 0.101845, acc 0.96875 2019-09-25T17:04:33.388629: step 1896, loss 0.14969, acc 0.9375 2019-09-25T17:04:33.512963: step 1897, loss 0.0792431, acc 0.953125 2019-09-25T17:04:33.632250: step 1898, loss 0.0769289, acc 0.984375 2019-09-25T17:04:33.754614: step 1899, loss 0.179072, acc 0.953125 2019-09-25T17:04:33.879601: step 1900, loss 0.0383862, acc 1 Evaluation: 2019-09-25T17:04:34.092560: step 1900, loss 0.717449, acc 0.732645 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-1900 2019-09-25T17:04:34.357374: step 1901, loss 0.0345728, acc 0.984375 2019-09-25T17:04:34.477436: step 1902, loss 0.0630673, acc 0.96875 2019-09-25T17:04:34.596728: step 1903, loss 0.105497, acc 0.984375 2019-09-25T17:04:34.716621: step 1904, loss 0.0552965, acc 1 2019-09-25T17:04:34.836107: step 1905, loss 0.0533252, acc 0.984375 2019-09-25T17:04:34.954557: step 1906, loss 0.0771335, acc 0.96875 2019-09-25T17:04:35.086571: step 1907, loss 0.0670567, acc 0.984375 2019-09-25T17:04:35.205488: step 1908, loss 0.101177, acc 0.9375 2019-09-25T17:04:35.324162: step 1909, loss 0.0789257, acc 0.96875 2019-09-25T17:04:35.444452: step 1910, loss 0.123657, acc 0.96875 2019-09-25T17:04:35.563013: step 1911, loss 0.0289323, acc 1 2019-09-25T17:04:35.682766: step 1912, loss 0.113362, acc 0.953125 2019-09-25T17:04:35.800886: step 1913, loss 0.130443, acc 0.984375 2019-09-25T17:04:35.918987: step 1914, loss 0.060822, acc 0.984375 2019-09-25T17:04:36.040384: step 1915, loss 0.0479381, acc 0.984375 2019-09-25T17:04:36.169580: step 1916, loss 0.0472799, acc 1 2019-09-25T17:04:36.290472: step 1917, loss 0.0523768, acc 0.984375 2019-09-25T17:04:36.411665: step 1918, loss 0.135768, acc 0.953125 2019-09-25T17:04:36.532160: step 1919, loss 0.0343207, acc 1 2019-09-25T17:04:36.659600: step 1920, loss 0.059053, acc 0.984375 2019-09-25T17:04:36.779341: step 1921, loss 0.110878, acc 0.953125 2019-09-25T17:04:36.897026: step 1922, loss 0.0883889, acc 0.953125 2019-09-25T17:04:37.037327: step 1923, loss 0.133249, acc 0.90625 2019-09-25T17:04:37.240236: step 1924, loss 0.0807575, acc 0.96875 2019-09-25T17:04:37.508492: step 1925, loss 0.0909166, acc 0.96875 2019-09-25T17:04:37.661345: step 1926, loss 0.0658913, acc 1 2019-09-25T17:04:37.817067: step 1927, loss 0.0603007, acc 0.984375 2019-09-25T17:04:37.964380: step 1928, loss 0.0452418, acc 0.984375 2019-09-25T17:04:38.100459: step 1929, loss 0.131991, acc 0.9375 2019-09-25T17:04:38.231436: step 1930, loss 0.131794, acc 0.9375 2019-09-25T17:04:38.373784: step 1931, loss 0.16866, acc 0.953125 2019-09-25T17:04:38.493112: step 1932, loss 0.0833936, acc 0.953125 2019-09-25T17:04:38.613341: step 1933, loss 0.110372, acc 0.953125 2019-09-25T17:04:38.758357: step 1934, loss 0.0794755, acc 0.96875 2019-09-25T17:04:38.880240: step 1935, loss 0.0623235, acc 0.984375 2019-09-25T17:04:39.000892: step 1936, loss 0.0686581, acc 0.96875 2019-09-25T17:04:39.122700: step 1937, loss 0.197549, acc 0.90625 2019-09-25T17:04:39.247447: step 1938, loss 0.0818989, acc 0.953125 2019-09-25T17:04:39.375278: step 1939, loss 0.14754, acc 0.953125 2019-09-25T17:04:39.497251: step 1940, loss 0.0506145, acc 0.984375 2019-09-25T17:04:39.616859: step 1941, loss 0.0974041, acc 0.96875 2019-09-25T17:04:39.739124: step 1942, loss 0.104322, acc 0.953125 2019-09-25T17:04:39.861366: step 1943, loss 0.021893, acc 1 2019-09-25T17:04:39.982351: step 1944, loss 0.0439897, acc 0.984375 2019-09-25T17:04:40.100909: step 1945, loss 0.158992, acc 0.921875 2019-09-25T17:04:40.220817: step 1946, loss 0.0932671, acc 0.984375 2019-09-25T17:04:40.352079: step 1947, loss 0.0489018, acc 0.984375 2019-09-25T17:04:40.474321: step 1948, loss 0.034636, acc 1 2019-09-25T17:04:40.593937: step 1949, loss 0.101089, acc 0.953125 2019-09-25T17:04:40.709040: step 1950, loss 0.10435, acc 0.95 2019-09-25T17:04:40.833291: step 1951, loss 0.0394587, acc 0.984375 2019-09-25T17:04:40.955420: step 1952, loss 0.0379741, acc 1 2019-09-25T17:04:41.078302: step 1953, loss 0.0627148, acc 0.984375 2019-09-25T17:04:41.200990: step 1954, loss 0.0123707, acc 1 2019-09-25T17:04:41.331173: step 1955, loss 0.0251095, acc 1 2019-09-25T17:04:41.450484: step 1956, loss 0.0466471, acc 1 2019-09-25T17:04:41.573957: step 1957, loss 0.0412254, acc 1 2019-09-25T17:04:41.694365: step 1958, loss 0.105434, acc 0.953125 2019-09-25T17:04:41.815913: step 1959, loss 0.0619572, acc 0.96875 2019-09-25T17:04:41.936939: step 1960, loss 0.131806, acc 0.921875 2019-09-25T17:04:42.057804: step 1961, loss 0.0676095, acc 0.984375 2019-09-25T17:04:42.177399: step 1962, loss 0.0670905, acc 0.984375 2019-09-25T17:04:42.301587: step 1963, loss 0.0994897, acc 0.96875 2019-09-25T17:04:42.436677: step 1964, loss 0.0981976, acc 0.953125 2019-09-25T17:04:42.556627: step 1965, loss 0.0575784, acc 1 2019-09-25T17:04:42.677111: step 1966, loss 0.0365684, acc 0.984375 2019-09-25T17:04:42.798683: step 1967, loss 0.0963244, acc 0.96875 2019-09-25T17:04:42.923656: step 1968, loss 0.0299137, acc 0.984375 2019-09-25T17:04:43.047454: step 1969, loss 0.0443574, acc 0.984375 2019-09-25T17:04:43.166882: step 1970, loss 0.0784662, acc 0.984375 2019-09-25T17:04:43.286148: step 1971, loss 0.0725279, acc 0.96875 2019-09-25T17:04:43.412382: step 1972, loss 0.0359709, acc 1 2019-09-25T17:04:43.533186: step 1973, loss 0.110441, acc 0.96875 2019-09-25T17:04:43.652508: step 1974, loss 0.0403061, acc 0.96875 2019-09-25T17:04:43.774704: step 1975, loss 0.0493745, acc 0.984375 2019-09-25T17:04:43.899088: step 1976, loss 0.0320638, acc 1 2019-09-25T17:04:44.023406: step 1977, loss 0.117898, acc 0.953125 2019-09-25T17:04:44.150025: step 1978, loss 0.0393737, acc 1 2019-09-25T17:04:44.269117: step 1979, loss 0.108853, acc 0.953125 2019-09-25T17:04:44.390366: step 1980, loss 0.11519, acc 0.96875 2019-09-25T17:04:44.522728: step 1981, loss 0.0856339, acc 0.96875 2019-09-25T17:04:44.643918: step 1982, loss 0.0412275, acc 1 2019-09-25T17:04:44.762595: step 1983, loss 0.0732059, acc 0.96875 2019-09-25T17:04:44.881861: step 1984, loss 0.102121, acc 0.96875 2019-09-25T17:04:45.000362: step 1985, loss 0.0674838, acc 0.984375 2019-09-25T17:04:45.119207: step 1986, loss 0.0558312, acc 0.984375 2019-09-25T17:04:45.240166: step 1987, loss 0.0378081, acc 0.984375 2019-09-25T17:04:45.364133: step 1988, loss 0.0448392, acc 0.984375 2019-09-25T17:04:45.494822: step 1989, loss 0.0541651, acc 0.984375 2019-09-25T17:04:45.612355: step 1990, loss 0.0725526, acc 0.984375 2019-09-25T17:04:45.732471: step 1991, loss 0.0884083, acc 0.96875 2019-09-25T17:04:45.855233: step 1992, loss 0.0171613, acc 1 2019-09-25T17:04:45.977124: step 1993, loss 0.105749, acc 0.96875 2019-09-25T17:04:46.098863: step 1994, loss 0.0525691, acc 0.984375 2019-09-25T17:04:46.219158: step 1995, loss 0.138861, acc 0.953125 2019-09-25T17:04:46.342064: step 1996, loss 0.0291482, acc 0.984375 2019-09-25T17:04:46.461999: step 1997, loss 0.0517282, acc 0.96875 2019-09-25T17:04:46.600240: step 1998, loss 0.0550505, acc 0.984375 2019-09-25T17:04:46.723819: step 1999, loss 0.0378541, acc 0.96875 2019-09-25T17:04:46.845054: step 2000, loss 0.0497283, acc 0.984375 Evaluation: 2019-09-25T17:04:47.056736: step 2000, loss 0.769498, acc 0.727017 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-2000 2019-09-25T17:04:47.289304: step 2001, loss 0.0870964, acc 0.96875 2019-09-25T17:04:47.412434: step 2002, loss 0.0939161, acc 0.953125 2019-09-25T17:04:47.534461: step 2003, loss 0.0450351, acc 0.984375 2019-09-25T17:04:47.659055: step 2004, loss 0.105083, acc 0.9375 2019-09-25T17:04:47.780952: step 2005, loss 0.0529068, acc 0.984375 2019-09-25T17:04:47.901753: step 2006, loss 0.0858679, acc 0.96875 2019-09-25T17:04:48.022781: step 2007, loss 0.0823922, acc 0.953125 2019-09-25T17:04:48.144293: step 2008, loss 0.0839011, acc 0.984375 2019-09-25T17:04:48.264569: step 2009, loss 0.184349, acc 0.921875 2019-09-25T17:04:48.385919: step 2010, loss 0.0528471, acc 0.984375 2019-09-25T17:04:48.505892: step 2011, loss 0.0575553, acc 0.96875 2019-09-25T17:04:48.634647: step 2012, loss 0.0624558, acc 0.984375 2019-09-25T17:04:48.759562: step 2013, loss 0.0522466, acc 0.984375 2019-09-25T17:04:48.905748: step 2014, loss 0.0541708, acc 1 2019-09-25T17:04:49.025605: step 2015, loss 0.0495926, acc 0.984375 2019-09-25T17:04:49.146499: step 2016, loss 0.0759337, acc 0.96875 2019-09-25T17:04:49.266001: step 2017, loss 0.054965, acc 0.984375 2019-09-25T17:04:49.393787: step 2018, loss 0.060808, acc 0.984375 2019-09-25T17:04:49.514473: step 2019, loss 0.0890895, acc 0.96875 2019-09-25T17:04:49.639959: step 2020, loss 0.0944456, acc 0.96875 2019-09-25T17:04:49.760875: step 2021, loss 0.0655479, acc 0.96875 2019-09-25T17:04:49.882031: step 2022, loss 0.0316936, acc 1 2019-09-25T17:04:50.002652: step 2023, loss 0.102705, acc 0.96875 2019-09-25T17:04:50.126663: step 2024, loss 0.0239387, acc 1 2019-09-25T17:04:50.247783: step 2025, loss 0.0281385, acc 1 2019-09-25T17:04:50.367996: step 2026, loss 0.0551563, acc 0.984375 2019-09-25T17:04:50.486590: step 2027, loss 0.0643688, acc 0.96875 2019-09-25T17:04:50.608359: step 2028, loss 0.097232, acc 0.9375 2019-09-25T17:04:50.740330: step 2029, loss 0.0642446, acc 0.953125 2019-09-25T17:04:50.861326: step 2030, loss 0.0586472, acc 0.96875 2019-09-25T17:04:50.981596: step 2031, loss 0.0272376, acc 1 2019-09-25T17:04:51.102068: step 2032, loss 0.0745028, acc 0.984375 2019-09-25T17:04:51.220928: step 2033, loss 0.0758368, acc 0.96875 2019-09-25T17:04:51.342712: step 2034, loss 0.111232, acc 0.953125 2019-09-25T17:04:51.462989: step 2035, loss 0.0550572, acc 1 2019-09-25T17:04:51.583438: step 2036, loss 0.0617448, acc 0.984375 2019-09-25T17:04:51.707613: step 2037, loss 0.145125, acc 0.921875 2019-09-25T17:04:51.830549: step 2038, loss 0.0739662, acc 0.96875 2019-09-25T17:04:51.951751: step 2039, loss 0.0685225, acc 0.984375 2019-09-25T17:04:52.071881: step 2040, loss 0.0326221, acc 1 2019-09-25T17:04:52.194852: step 2041, loss 0.149197, acc 0.90625 2019-09-25T17:04:52.315800: step 2042, loss 0.0353808, acc 1 2019-09-25T17:04:52.437374: step 2043, loss 0.144667, acc 0.9375 2019-09-25T17:04:52.556484: step 2044, loss 0.0620521, acc 0.96875 2019-09-25T17:04:52.676955: step 2045, loss 0.0446539, acc 0.984375 2019-09-25T17:04:52.811183: step 2046, loss 0.0664043, acc 1 2019-09-25T17:04:52.934027: step 2047, loss 0.0327003, acc 1 2019-09-25T17:04:53.055035: step 2048, loss 0.0562392, acc 0.984375 2019-09-25T17:04:53.175984: step 2049, loss 0.0856391, acc 0.96875 2019-09-25T17:04:53.295615: step 2050, loss 0.0580784, acc 0.96875 2019-09-25T17:04:53.418346: step 2051, loss 0.0764899, acc 0.96875 2019-09-25T17:04:53.539523: step 2052, loss 0.0700375, acc 0.96875 2019-09-25T17:04:53.657981: step 2053, loss 0.0844098, acc 0.984375 2019-09-25T17:04:53.781310: step 2054, loss 0.0128697, acc 1 2019-09-25T17:04:53.902302: step 2055, loss 0.10127, acc 0.953125 2019-09-25T17:04:54.021602: step 2056, loss 0.0727765, acc 0.984375 2019-09-25T17:04:54.142303: step 2057, loss 0.0942816, acc 0.96875 2019-09-25T17:04:54.262767: step 2058, loss 0.0678857, acc 0.984375 2019-09-25T17:04:54.389578: step 2059, loss 0.0717446, acc 0.984375 2019-09-25T17:04:54.508443: step 2060, loss 0.0647455, acc 0.96875 2019-09-25T17:04:54.627768: step 2061, loss 0.0920811, acc 0.96875 2019-09-25T17:04:54.748564: step 2062, loss 0.0558234, acc 0.96875 2019-09-25T17:04:54.881000: step 2063, loss 0.0542553, acc 0.984375 2019-09-25T17:04:54.999789: step 2064, loss 0.0422838, acc 0.984375 2019-09-25T17:04:55.118015: step 2065, loss 0.0397931, acc 1 2019-09-25T17:04:55.236607: step 2066, loss 0.171908, acc 0.9375 2019-09-25T17:04:55.361330: step 2067, loss 0.0944056, acc 0.953125 2019-09-25T17:04:55.482233: step 2068, loss 0.0954782, acc 0.953125 2019-09-25T17:04:55.600762: step 2069, loss 0.117388, acc 0.96875 2019-09-25T17:04:55.719440: step 2070, loss 0.0528369, acc 0.984375 2019-09-25T17:04:55.847495: step 2071, loss 0.0625991, acc 0.96875 2019-09-25T17:04:55.969702: step 2072, loss 0.0701636, acc 0.953125 2019-09-25T17:04:56.091194: step 2073, loss 0.195752, acc 0.921875 2019-09-25T17:04:56.210571: step 2074, loss 0.034413, acc 0.984375 2019-09-25T17:04:56.332582: step 2075, loss 0.0448437, acc 0.984375 2019-09-25T17:04:56.456168: step 2076, loss 0.025536, acc 1 2019-09-25T17:04:56.581852: step 2077, loss 0.0401388, acc 1 2019-09-25T17:04:56.709289: step 2078, loss 0.143196, acc 0.921875 2019-09-25T17:04:56.832152: step 2079, loss 0.0919119, acc 0.953125 2019-09-25T17:04:56.964264: step 2080, loss 0.103171, acc 0.953125 2019-09-25T17:04:57.087146: step 2081, loss 0.0776785, acc 0.953125 2019-09-25T17:04:57.206783: step 2082, loss 0.161033, acc 0.953125 2019-09-25T17:04:57.327587: step 2083, loss 0.0362803, acc 0.984375 2019-09-25T17:04:57.449907: step 2084, loss 0.106374, acc 0.96875 2019-09-25T17:04:57.570808: step 2085, loss 0.143542, acc 0.953125 2019-09-25T17:04:57.690165: step 2086, loss 0.176107, acc 0.9375 2019-09-25T17:04:57.812229: step 2087, loss 0.0670019, acc 0.953125 2019-09-25T17:04:57.941045: step 2088, loss 0.119706, acc 0.953125 2019-09-25T17:04:58.062740: step 2089, loss 0.0356126, acc 0.984375 2019-09-25T17:04:58.182525: step 2090, loss 0.0351039, acc 1 2019-09-25T17:04:58.306475: step 2091, loss 0.0791547, acc 0.96875 2019-09-25T17:04:58.429769: step 2092, loss 0.0321191, acc 1 2019-09-25T17:04:58.553372: step 2093, loss 0.0920803, acc 0.953125 2019-09-25T17:04:58.674722: step 2094, loss 0.0751459, acc 0.984375 2019-09-25T17:04:58.797382: step 2095, loss 0.0466573, acc 0.984375 2019-09-25T17:04:58.920399: step 2096, loss 0.0619758, acc 0.953125 2019-09-25T17:04:59.048273: step 2097, loss 0.0803469, acc 0.984375 2019-09-25T17:04:59.169920: step 2098, loss 0.0796164, acc 0.96875 2019-09-25T17:04:59.293634: step 2099, loss 0.114674, acc 0.953125 2019-09-25T17:04:59.413679: step 2100, loss 0.0960028, acc 0.95 Evaluation: 2019-09-25T17:04:59.612828: step 2100, loss 0.787366, acc 0.739212 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-2100 2019-09-25T17:04:59.880897: step 2101, loss 0.0297317, acc 0.984375 2019-09-25T17:05:00.011639: step 2102, loss 0.0561712, acc 0.984375 2019-09-25T17:05:00.133773: step 2103, loss 0.0379765, acc 0.984375 2019-09-25T17:05:00.256276: step 2104, loss 0.0321496, acc 1 2019-09-25T17:05:00.381450: step 2105, loss 0.0318704, acc 1 2019-09-25T17:05:00.500362: step 2106, loss 0.0304243, acc 1 2019-09-25T17:05:00.619898: step 2107, loss 0.0154809, acc 1 2019-09-25T17:05:00.739084: step 2108, loss 0.0461526, acc 0.984375 2019-09-25T17:05:00.860745: step 2109, loss 0.0588596, acc 0.96875 2019-09-25T17:05:00.981698: step 2110, loss 0.104648, acc 0.96875 2019-09-25T17:05:01.113617: step 2111, loss 0.0777374, acc 0.953125 2019-09-25T17:05:01.236300: step 2112, loss 0.0418611, acc 1 2019-09-25T17:05:01.359449: step 2113, loss 0.0506932, acc 0.984375 2019-09-25T17:05:01.481709: step 2114, loss 0.0100868, acc 1 2019-09-25T17:05:01.602155: step 2115, loss 0.0419339, acc 0.984375 2019-09-25T17:05:01.722355: step 2116, loss 0.0350051, acc 1 2019-09-25T17:05:01.846101: step 2117, loss 0.0200887, acc 1 2019-09-25T17:05:01.967991: step 2118, loss 0.0332971, acc 1 2019-09-25T17:05:02.101590: step 2119, loss 0.0270156, acc 1 2019-09-25T17:05:02.222532: step 2120, loss 0.0712495, acc 0.984375 2019-09-25T17:05:02.341590: step 2121, loss 0.0526532, acc 0.984375 2019-09-25T17:05:02.465110: step 2122, loss 0.0854502, acc 0.984375 2019-09-25T17:05:02.587000: step 2123, loss 0.0222088, acc 1 2019-09-25T17:05:02.706824: step 2124, loss 0.0382017, acc 0.96875 2019-09-25T17:05:02.825269: step 2125, loss 0.0285708, acc 0.984375 2019-09-25T17:05:02.947625: step 2126, loss 0.0812325, acc 0.96875 2019-09-25T17:05:03.069963: step 2127, loss 0.0340771, acc 1 2019-09-25T17:05:03.200138: step 2128, loss 0.0307962, acc 0.984375 2019-09-25T17:05:03.321649: step 2129, loss 0.0689, acc 0.96875 2019-09-25T17:05:03.442212: step 2130, loss 0.0410553, acc 1 2019-09-25T17:05:03.564970: step 2131, loss 0.0196328, acc 1 2019-09-25T17:05:03.687260: step 2132, loss 0.0197506, acc 1 2019-09-25T17:05:03.810927: step 2133, loss 0.026133, acc 1 2019-09-25T17:05:03.953774: step 2134, loss 0.0689578, acc 0.96875 2019-09-25T17:05:04.076766: step 2135, loss 0.0317317, acc 1 2019-09-25T17:05:04.209628: step 2136, loss 0.0191067, acc 1 2019-09-25T17:05:04.329631: step 2137, loss 0.0319657, acc 1 2019-09-25T17:05:04.449826: step 2138, loss 0.0763396, acc 0.96875 2019-09-25T17:05:04.569694: step 2139, loss 0.0251709, acc 1 2019-09-25T17:05:04.687766: step 2140, loss 0.0417462, acc 0.984375 2019-09-25T17:05:04.808256: step 2141, loss 0.0548351, acc 0.96875 2019-09-25T17:05:04.927936: step 2142, loss 0.0796225, acc 0.96875 2019-09-25T17:05:05.047362: step 2143, loss 0.0528244, acc 0.984375 2019-09-25T17:05:05.171016: step 2144, loss 0.0721707, acc 0.984375 2019-09-25T17:05:05.301306: step 2145, loss 0.111149, acc 0.9375 2019-09-25T17:05:05.421657: step 2146, loss 0.0378045, acc 0.984375 2019-09-25T17:05:05.541539: step 2147, loss 0.0331855, acc 0.984375 2019-09-25T17:05:05.662135: step 2148, loss 0.0376215, acc 0.984375 2019-09-25T17:05:05.782123: step 2149, loss 0.0707341, acc 0.96875 2019-09-25T17:05:05.902480: step 2150, loss 0.0397726, acc 0.984375 2019-09-25T17:05:06.023239: step 2151, loss 0.0909104, acc 0.96875 2019-09-25T17:05:06.144310: step 2152, loss 0.0470831, acc 0.96875 2019-09-25T17:05:06.271688: step 2153, loss 0.0222202, acc 1 2019-09-25T17:05:06.395392: step 2154, loss 0.071802, acc 0.96875 2019-09-25T17:05:06.516593: step 2155, loss 0.0220214, acc 1 2019-09-25T17:05:06.636283: step 2156, loss 0.0624151, acc 0.96875 2019-09-25T17:05:06.758931: step 2157, loss 0.0739308, acc 0.96875 2019-09-25T17:05:06.880131: step 2158, loss 0.0970819, acc 0.96875 2019-09-25T17:05:07.000200: step 2159, loss 0.0646618, acc 0.953125 2019-09-25T17:05:07.121818: step 2160, loss 0.0185028, acc 1 2019-09-25T17:05:07.241978: step 2161, loss 0.0556141, acc 0.984375 2019-09-25T17:05:07.374464: step 2162, loss 0.0692046, acc 0.96875 2019-09-25T17:05:07.494240: step 2163, loss 0.0456904, acc 0.984375 2019-09-25T17:05:07.613181: step 2164, loss 0.0202299, acc 1 2019-09-25T17:05:07.733406: step 2165, loss 0.0499006, acc 0.96875 2019-09-25T17:05:07.853044: step 2166, loss 0.0251958, acc 1 2019-09-25T17:05:07.971628: step 2167, loss 0.0405966, acc 1 2019-09-25T17:05:08.092160: step 2168, loss 0.0455766, acc 0.984375 2019-09-25T17:05:08.212980: step 2169, loss 0.0518767, acc 0.96875 2019-09-25T17:05:08.339380: step 2170, loss 0.0411761, acc 0.96875 2019-09-25T17:05:08.458966: step 2171, loss 0.064175, acc 0.984375 2019-09-25T17:05:08.576584: step 2172, loss 0.0325525, acc 1 2019-09-25T17:05:08.694657: step 2173, loss 0.057088, acc 0.96875 2019-09-25T17:05:08.813526: step 2174, loss 0.0199393, acc 1 2019-09-25T17:05:08.936989: step 2175, loss 0.0426527, acc 1 2019-09-25T17:05:09.058929: step 2176, loss 0.0827552, acc 0.953125 2019-09-25T17:05:09.177909: step 2177, loss 0.0254691, acc 0.984375 2019-09-25T17:05:09.300944: step 2178, loss 0.126252, acc 0.953125 2019-09-25T17:05:09.436438: step 2179, loss 0.121936, acc 0.96875 2019-09-25T17:05:09.555906: step 2180, loss 0.0898061, acc 0.96875 2019-09-25T17:05:09.676371: step 2181, loss 0.0833908, acc 0.984375 2019-09-25T17:05:09.796998: step 2182, loss 0.0873014, acc 0.953125 2019-09-25T17:05:09.916496: step 2183, loss 0.0921776, acc 0.953125 2019-09-25T17:05:10.038856: step 2184, loss 0.0449402, acc 0.984375 2019-09-25T17:05:10.159273: step 2185, loss 0.0246301, acc 1 2019-09-25T17:05:10.279930: step 2186, loss 0.0375082, acc 1 2019-09-25T17:05:10.404946: step 2187, loss 0.0600741, acc 0.984375 2019-09-25T17:05:10.525644: step 2188, loss 0.0311001, acc 1 2019-09-25T17:05:10.645701: step 2189, loss 0.010758, acc 1 2019-09-25T17:05:10.766384: step 2190, loss 0.0325381, acc 0.984375 2019-09-25T17:05:10.887301: step 2191, loss 0.0310055, acc 0.984375 2019-09-25T17:05:11.007658: step 2192, loss 0.0373815, acc 0.984375 2019-09-25T17:05:11.127071: step 2193, loss 0.0537261, acc 0.984375 2019-09-25T17:05:11.248323: step 2194, loss 0.0639518, acc 0.96875 2019-09-25T17:05:11.371056: step 2195, loss 0.11299, acc 0.96875 2019-09-25T17:05:11.501300: step 2196, loss 0.021571, acc 1 2019-09-25T17:05:11.619941: step 2197, loss 0.0386563, acc 0.984375 2019-09-25T17:05:11.740638: step 2198, loss 0.0281227, acc 1 2019-09-25T17:05:11.869563: step 2199, loss 0.0247732, acc 1 2019-09-25T17:05:11.991991: step 2200, loss 0.029651, acc 0.984375 Evaluation: 2019-09-25T17:05:12.195992: step 2200, loss 0.814987, acc 0.742026 Saved model checkpoint to /kaggle/working/runs/1569430829/checkpoints/model-2200 2019-09-25T17:05:12.466395: step 2201, loss 0.0625801, acc 0.96875 2019-09-25T17:05:12.588923: step 2202, loss 0.104464, acc 0.9375 2019-09-25T17:05:12.710239: step 2203, loss 0.0615943, acc 0.96875 2019-09-25T17:05:12.832059: step 2204, loss 0.0124766, acc 1 2019-09-25T17:05:12.954992: step 2205, loss 0.0261898, acc 0.984375 2019-09-25T17:05:13.078661: step 2206, loss 0.053574, acc 0.984375 2019-09-25T17:05:13.200218: step 2207, loss 0.0803879, acc 0.984375 2019-09-25T17:05:13.324345: step 2208, loss 0.0162955, acc 1 2019-09-25T17:05:13.445937: step 2209, loss 0.0488491, acc 0.984375 2019-09-25T17:05:13.581323: step 2210, loss 0.0529769, acc 0.953125 2019-09-25T17:05:13.701609: step 2211, loss 0.0243921, acc 1 2019-09-25T17:05:13.823563: step 2212, loss 0.0287717, acc 1 2019-09-25T17:05:13.971882: step 2213, loss 0.0374461, acc 1 2019-09-25T17:05:14.093318: step 2214, loss 0.0155318, acc 1 2019-09-25T17:05:14.215312: step 2215, loss 0.0511121, acc 0.984375 2019-09-25T17:05:14.336408: step 2216, loss 0.0229652, acc 0.984375 2019-09-25T17:05:14.458339: step 2217, loss 0.0369249, acc 0.984375 2019-09-25T17:05:14.592655: step 2218, loss 0.0816827, acc 0.96875 2019-09-25T17:05:14.714077: step 2219, loss 0.0835757, acc 0.96875 2019-09-25T17:05:14.834832: step 2220, loss 0.0332484, acc 0.984375 2019-09-25T17:05:14.953303: step 2221, loss 0.0297884, acc 1 2019-09-25T17:05:15.074201: step 2222, loss 0.0416463, acc 1 2019-09-25T17:05:15.194396: step 2223, loss 0.0506008, acc 1 2019-09-25T17:05:15.315922: step 2224, loss 0.0508721, acc 0.984375 2019-09-25T17:05:15.440133: step 2225, loss 0.0310283, acc 1 2019-09-25T17:05:15.561046: step 2226, loss 0.0583289, acc 0.96875 2019-09-25T17:05:15.686966: step 2227, loss 0.0609555, acc 0.984375 2019-09-25T17:05:15.809306: step 2228, loss 0.0492584, acc 0.984375 2019-09-25T17:05:15.929631: step 2229, loss 0.0571979, acc 0.984375 2019-09-25T17:05:16.049912: step 2230, loss 0.0389712, acc 0.984375 2019-09-25T17:05:16.171483: step 2231, loss 0.0788497, acc 0.96875 2019-09-25T17:05:16.295141: step 2232, loss 0.044255, acc 0.984375 2019-09-25T17:05:16.416731: step 2233, loss 0.0688348, acc 0.984375 2019-09-25T17:05:16.536227: step 2234, loss 0.0461735, acc 0.984375 2019-09-25T17:05:16.666134: step 2235, loss 0.106788, acc 0.984375 2019-09-25T17:05:16.787678: step 2236, loss 0.0126432, acc 1 2019-09-25T17:05:16.908687: step 2237, loss 0.0359773, acc 0.984375 2019-09-25T17:05:17.030609: step 2238, loss 0.0576602, acc 0.96875 2019-09-25T17:05:17.153537: step 2239, loss 0.014365, acc 1 2019-09-25T17:05:17.276347: step 2240, loss 0.0589547, acc 0.96875 2019-09-25T17:05:17.401860: step 2241, loss 0.0203834, acc 1 2019-09-25T17:05:17.522423: step 2242, loss 0.0250305, acc 1 2019-09-25T17:05:17.644930: step 2243, loss 0.0433754, acc 0.984375 2019-09-25T17:05:17.772623: step 2244, loss 0.0519589, acc 0.96875 2019-09-25T17:05:17.893315: step 2245, loss 0.0345871, acc 0.984375 2019-09-25T17:05:18.012137: step 2246, loss 0.0689895, acc 0.96875 2019-09-25T17:05:18.133669: step 2247, loss 0.0443852, acc 0.984375 2019-09-25T17:05:18.253834: step 2248, loss 0.0907534, acc 0.96875 2019-09-25T17:05:18.376569: step 2249, loss 0.0296122, acc 1 2019-09-25T17:05:18.491345: step 2250, loss 0.0653899, acc 0.966667
An exception has occurred, use %tb to see the full traceback.
SystemExit
/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3334: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D. warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
for dirname, _, filenames in os.walk('.'):
for filename in filenames:
print(os.path.join(dirname, filename))
./__output__.json ./__notebook__.ipynb ./runs/1569430829/vocab ./runs/1569430829/summaries/dev/events.out.tfevents.1569430831.595f7815e0fd ./runs/1569430829/summaries/train/events.out.tfevents.1569430831.595f7815e0fd ./runs/1569430829/checkpoints/model-2200.index ./runs/1569430829/checkpoints/model-2000.index ./runs/1569430829/checkpoints/checkpoint ./runs/1569430829/checkpoints/model-2100.data-00000-of-00001 ./runs/1569430829/checkpoints/model-1800.index ./runs/1569430829/checkpoints/model-1900.index ./runs/1569430829/checkpoints/model-2100.index ./runs/1569430829/checkpoints/model-2000.data-00000-of-00001 ./runs/1569430829/checkpoints/model-2200.meta ./runs/1569430829/checkpoints/model-1800.meta ./runs/1569430829/checkpoints/model-2200.data-00000-of-00001 ./runs/1569430829/checkpoints/model-2100.meta ./runs/1569430829/checkpoints/model-1800.data-00000-of-00001 ./runs/1569430829/checkpoints/model-1900.data-00000-of-00001 ./runs/1569430829/checkpoints/model-1900.meta ./runs/1569430829/checkpoints/model-2000.meta
You can run directly to use the whole dataset for evaluation, or you may split a piece from the training dataset for testing.
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from tensorflow.contrib import learn
import csv
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_string("checkpoint_dir", "./runs/" + timestamp + "/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", True, "Evaluate on all training data")
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
# print("\nParameters:")
# for attr, value in sorted(FLAGS.__flags.items()):
# print("{}={}".format(attr.upper(), value))
# print("")
# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
x_raw, y_test = load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
y_test = np.argmax(y_test, axis=1)
else:
x_raw = ["a masterpiece four years in the making", "everything is off."]
y_test = [1, 0]
# Map data into vocabulary
vocab_path = os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
# Save the evaluation to a csv
predictions_human_readable = np.column_stack((np.array(x_raw), all_predictions))
out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
print("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
W0925 17:05:20.139110 139759124096384 deprecation.py:323] From /opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version. Instructions for updating: Use standard file APIs to check for files with this prefix. I0925 17:05:20.142160 139759124096384 saver.py:1280] Restoring parameters from /kaggle/working/runs/1569430829/checkpoints/model-2200
Evaluating... Total number of test examples: 10662 Accuracy: 0.974207 Saving evaluation to ./runs/1569430829/checkpoints/../prediction.csv