可视化训练的Tensorflow模型的权重

时间:2018-01-07 02:38:23

标签: python tensorflow neural-network conv-neural-network

我在TF训练了一个模型,我想要想象它的重量。下面是我用来训练模型的代码。它看起来像很多代码,但它非常直接。我只是构建网络,定义占位符并使用optimize()函数训练它。

# Create the neural network
  def conv_net(x, reuse=tf.AUTO_REUSE):

# Define a scope for reusing the variables
    with tf.variable_scope('ConvNet', reuse=tf.AUTO_REUSE):

    # Reshape to match picture format [Height x Width x Channel]
    # Tensor input become 4-D: [Batch Size, Height, Width, Channel]
    #x = tf.reshape(x, shape=[-1, height, width, channels])

    # -- FIRST BLOCK --
    # Two convolutional layers with 32 filters, size of 5, padding = 'same'
    # One max-pool layer 2X2
    # One dropout layer
      conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu, padding='SAME', name='conv1')
      conv1 = tf.layers.batch_normalization(conv1,training=training)
      conv2 = tf.layers.conv2d(conv1, 32, 5, activation=tf.nn.relu, padding='SAME', name='conv2')
      conv2 = tf.layers.batch_normalization(conv2, training=training)
      conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
      conv2 = tf.layers.dropout(conv2, rate=dropout)

    # -- SECOND BLOCK --
    # Two convolutional layers with 32 filters, size of 3, padding = 'same'
    # One max-pool layer 2X2
    # One dropout layer with 1.5 dropout rate
      conv3 = tf.layers.conv2d(conv2, 32, 3, activation=tf.nn.relu, padding='SAME', name='conv3')
      conv3 = tf.layers.batch_normalization(conv3, training=training)
      conv4 = tf.layers.conv2d(conv3, 32, 3, activation=tf.nn.relu, padding='SAME', name='conv4')
      conv4 = tf.layers.batch_normalization(conv4, training=training)
      conv4 = tf.layers.max_pooling2d(conv4, 2, 2)
      conv4 = tf.layers.dropout(conv4, rate=dropout*1.25)

    # Flatten the data to a 1-D vector for fully connected layers
      fc1 = tf.contrib.layers.flatten(conv4)

    # -- THIRD BLOCK --
    # Two fully connected layers with 1024 neurons
    # 2x dropout rate on each layer
      fc1 = tf.layers.dense(fc1, 1024)
      fc1 = tf.layers.batch_normalization(fc1, training=training)
      fc1 = tf.layers.dropout(fc1, rate=dropout*2)
      fc2 = tf.layers.dense(fc1, 1024)
      fc2 = tf.layers.batch_normalization(fc2, training=training)
      fc2 = tf.layers.dropout(fc2, rate=dropout*2)

    # Output layer
      out = tf.layers.dense(fc2, num_classes)

  return out

#### Misc stuff tu run train part ####
# Predicted classes
logits_out = conv_net(images_placeholder, reuse=tf.AUTO_REUSE)

y_pred = tf.nn.softmax(logits_out, name='y_pred')
y_pred_cls = tf.argmax(y_pred, axis=1)

y_true_cls = tf.argmax(labels_placeholder, axis=1)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits_out,      labels=labels_placeholder)
cost = tf.reduce_mean(cross_entropy)

optimizer =  tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

#Optimizer
def optimize(epochs):

  start_time = time.time()

  for i in range(epochs):

    indices = np.random.choice(trainX.shape[0], batch_size)
    x_batch, y_true_batch = trainX[indices], trainY[indices]

    batch_norm_update = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    session.run([optimizer, batch_norm_update],  feed_dict={images_placeholder:x_batch,
                                                            labels_placeholder: y_true_batch,
                                                            training: True})

    saver.save(session, 'C:\\Users\\ggiorcelli\\Tensorflow Workbooks\\animals_model') 


    if (i+1) % 10 == 0:
        train_accuracy = session.run(accuracy, feed_dict={images_placeholder: x_batch, 
                                                          labels_placeholder: y_true_batch,
                                                          training: True})

        indices_test = np.random.choice(testX.shape[0], batch_size)
        images_test = testX[indices_test]
        labels_test = testY[indices_test]

        test_accuracy = session.run(accuracy, feed_dict={images_placeholder: images_test, 
                                                         labels_placeholder: labels_test,
                                                         training: True})

        print('Step {:3d}: training accuracy = {:0.3f}, test accuracy = {:0.3f}'.format(i+1, train_accuracy, test_accuracy))

  end_time = time.time()
  time_dif = end_time - start_time

  print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))

#Train
with tf.Session() as session:
  init = tf.global_variables_initializer()
  session.run(init, {training:True})
  optimize(epochs=200)

使用此代码,我能够成功训练模型。 现在,我想获得卷积层的权重。为此,我使用此代码段:

with tf.variable_scope('conv1', reuse=True):
  w1 = tf.get_variable('weights')
  x_image = tf.reshape(w1, [5, 5, 1, 32])
  img = tf.summary.image('img',x_image)

然而,使用上面的代码我只得到一个我无法想象的图像对象。我甚至不确定这是否是获得权重的正确方法。我已经看到其他人已经问过这个问题,但大多数答案都会导致Tensorboard,我想这样做是在iPython shell中。

以下是我希望看到的一个例子。

Conv Layer Weights 1

Conv Layer Weights 2

0 个答案:

没有答案