python - Tensorboard not corrrectly logging precision and recall metrics -


i using tensorboard log cross-entropy , accuracy successfully, precision , recall graphs wrong when logged tf.metrics.recall , tf.metrics.precicion.

my problem 6 class classification problem. know manual calculations outside of tensorboard precision should ~99% @ 80% recall, graphs recorded tensorboard show flat 16% precision @ 100% recall.

the precision stat logged curiously close 1/6th corresponds random selection of 6 output nodes.

the code below:

    def classifier_graph(x, y, learning_rate=0.1):             """                     build graph classification, given our input layer x ,                     output layer, y.             """              tf.name_scope('classifier'):                     tf.name_scope('model'):                             w = tf.variable(tf.zeros([xdim, ydim]), name='w')                             b = tf.variable(tf.zeros([ydim]), name='b')                             y_ = tf.matmul(x, w) + b                      tf.name_scope('cross_entropy'):                             diff = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_)                             cross_entropy = tf.reduce_mean(diff)                             summary = tf.summary.scalar('cross_entropy', cross_entropy)                      tf.name_scope('train'):                             #cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(y_), reduction_indices=[1]), name='cross_en$                             train_step = tf.train.gradientdescentoptimizer(learning_rate).minimize(cross_entropy)                             #minimise cross_entropy via gd                      tf.name_scope('accuracy'):                             tf.name_scope('correct_prediction'):                                     correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))                             tf.name_scope('accuracy'):                                     accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))                                     tf.summary.scalar('accuracy', accuracy)                       tf.name_scope('metrics'):                             _, recall = tf.metrics.recall(y, y_ )                             _, precision = tf.metrics.precision(y, y_)                              v_rec = tf.summary.scalar('recall', recall)                             v_prec = tf.summary.scalar('precision', precision)              metrics = tf.summary.merge_all()              return [w, b, y_, cross_entropy, train_step, metrics]        def train_classifier(insamples, outsamples, batch_size, iterations, feature_set_index=1, model=none, device="/gpu:0"):          x = tf.placeholder(tf.float32, [none, xdim], name='x') # none indications arbitrary first dimension         y = tf.placeholder(tf.float32, [none, ydim], name='y')          w, b, y_, cross_entropy, train_step, metrics  = classifier_graph(x, y)          tf.session(config=config) sess, tf.device(device):              init = tf.global_variables_initializer()             init_l = tf.local_variables_initializer()              sess.run(init)             sess.run(init_l)              file_writer = tf.summary.filewriter(logdir, tf.get_default_graph())              all_classifier_results, all_models, all_err, all_recall, all_precision = [],[],[],[],[]              t = 0             while t < iterations:                 batch_x, batch_y = batch_feed(insamples, batch_size, feature_set_index)                 t += 1                 _, err, metrics_str, = sess.run([train_step, cross_entropy, metrics], feed_dict={x: batch_x, y: batch_y })                  all_err.append(err)                  file_writer.add_summary(metrics_str,t)          return 'done' 

any ideas on why might be? thanks. x, y , y_ numpy arrays.


Comments

Popular posts from this blog

android - InAppBilling registering BroadcastReceiver in AndroidManifest -

python Tkinter Capturing keyboard events save as one single string -

sql server - Why does Linq-to-SQL add unnecessary COUNT()? -