Foolbox预测图像且预测错误(模型为1.0准确)

时间:2019-06-28 08:42:42

标签: python tensorflow

我在Tensorflow中有一个预先训练的模型(完全训练过的imagenet预先训练的模型),带有resnet_50网络架构。现在,我正在使用Foolbox来获取预测。

但是模型在原始图像上返回了错误的预测(模型为1.0准确)。

您能帮我解决问题吗?

import tensorflow as tf
import resnet50ORL
import numpy as np
import foolbox
import scipy.io as sio
def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)].T
    return Y
DATA = sio.loadmat('/home/vision/Data/DSCN_data/ORL_32x32.mat')
X_train = DATA['fea'].reshape(400, 32, 32, 1)/255
x1 = X_train[0, :, :, :]  #.reshape(32, 32, 1)
Y_train = DATA['gnd']
Y_train = convert_to_one_hot(Y_train-1, 40).T
m, H_size, W_size, C_size = X_train.shape
classes = 40
X = tf.placeholder(tf.float32, shape=(None, H_size, W_size, C_size), name='X')
Y = tf.placeholder(tf.float32, shape=(None, classes), name='Y')
logits = resnet50ORL.ResNet50_reference(X, classes)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), 
tf.argmax(Y, axis=1)), tf.float32))
saver = tf.train.Saver(tf.trainable_variables())
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    saver.restore(sess, '/home/vision/Data/checkpoint/ORL0/ORL.ckpt-541')
    model = foolbox.models.TensorFlowModel(X, logits, (0, 1), channel_axis=1)
    print(np.argmax(model.predictions(x1)))
    acc, bbb, ccc = sess.run([accuracy, logits, Y], feed_dict={X: X_train, Y: Y_train})
    print(acc)



import tensorflow as tf
import scipy.io as sio
import numpy as np
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
TRAINING = tf.Variable(initial_value=True, dtype=tf.bool, trainable=True)
def identity_block(X_input, kernel_size, filters, stage, block):
    """
    Implementation of the identity block as defined in Figure 3
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """
    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    with tf.name_scope("id_block_stage"+str(stage)):
        filter1, filter2, filter3 = filters
        X_shortcut = X_input
        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(1, 1), name=conv_name_base+'2a')
        x = tf.layers.batch_normalization(x, axis=3, name=bn_name_base+'2a', training=TRAINING)
        x = tf.nn.relu(x)
        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base+'2b')
        # batch_norm2 = tf.layers.batch_normalization(conv2, axis=3, name=bn_name_base+'2b', training=TRAINING)
        x = tf.nn.relu(x)
        # Third component of main path
        x = tf.layers.conv2d(x, filter3, kernel_size=(1, 1), name=conv_name_base+'2c')
        x = tf.layers.batch_normalization(x, axis=3, name=bn_name_base + '2c', training=TRAINING)
        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(x, X_shortcut)
        add_result = tf.nn.relu(X_add_shortcut)
    return add_result
def convolutional_block(X_input, kernel_size, filters, stage, block, stride = 2):
    """
    Implementation of the convolutional block as defined in Figure 4
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    kernel_size -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    stride -- Integer, specifying the stride to be used
    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """
    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    with tf.name_scope("conv_block_stage" + str(stage)):
        # Retrieve Filters
        filter1, filter2, filter3 = filters
        # Save the input value
        X_shortcut = X_input
        # First component of main path
        x = tf.layers.conv2d(X_input, filter1, kernel_size=(1, 1), strides=(stride, stride), name=conv_name_base+'2a')
        x = tf.layers.batch_normalization(x, axis=3, name=bn_name_base+'2a', training=TRAINING)
        x = tf.nn.relu(x)
        # Second component of main path
        x = tf.layers.conv2d(x, filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b')
        x = tf.layers.batch_normalization(x, axis=3, name=bn_name_base + '2b', training=TRAINING)
        x = tf.nn.relu(x)
        # Third component of main path
        x = tf.layers.conv2d(x, filter3, (1, 1), name=conv_name_base + '2c')
        x = tf.layers.batch_normalization(x, axis=3, name=bn_name_base + '2c', training=TRAINING)
        # SHORTCUT PATH
        X_shortcut = tf.layers.conv2d(X_shortcut, filter3, (1, 1), strides=(stride, stride), name=conv_name_base + '1')
        X_shortcut = tf.layers.batch_normalization(X_shortcut, axis=3, name=bn_name_base + '1', training=TRAINING)
        # Final step: Add shortcut value to main path, and pass it through a RELU activation
        X_add_shortcut = tf.add(X_shortcut, x)
        add_result = tf.nn.relu(X_add_shortcut)
    return add_result
def ResNet50_reference(X, classes= 6):
    """
    Implementation of the popular ResNet50 the following architecture:
    CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
    -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
    Arguments:
    Returns:
    """
    x = tf.pad(X, tf.constant([[0, 0], [3, 3], [3, 3], [0, 0]]), "CONSTANT")
    # stage 1
    x = tf.layers.conv2d(x, filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', name='conv1')
    x = tf.layers.batch_normalization(x, axis=3, name='bn_conv1')
    x = tf.nn.relu(x)
    # x = tf.layers.max_pooling2d(x, pool_size=(3, 3), strides=(2, 2))
    # stage 2
    x = convolutional_block(x, kernel_size=3, filters=[64, 64, 256], stage=2, block='a', stride=1)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    # stage 3
    x = convolutional_block(x, kernel_size=3, filters=[128, 128, 512], stage=3, block='a', stride=2)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
    # stage 4
    x = convolutional_block(x, kernel_size=3, filters=[256, 256, 1024], stage=4, block='a', stride=2)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
    # stage 5
    x = convolutional_block(x, kernel_size=3, filters=[512, 512, 2048], stage=5, block='a', stride=2)
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    x = tf.layers.average_pooling2d(x, pool_size=(2, 2), strides=(1, 1))
    flatten = tf.layers.flatten(x, name='flatten')
    logits = tf.layers.dense(flatten, units=40)
    # dense1 = tf.layers.dense(flatten, units=50, activation=tf.nn.relu)
    # logits = tf.layers.dense(dense1, units=10, activation=tf.nn.softmax)
    return logits
def random_mini_batches(X, Y, mini_batch_size=64, seed=None):
    """
    Creates a list of random minibatches from (X, Y)

    Arguments:
    X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
    Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)
    mini_batch_size - size of the mini-batches, integer
    seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
    Returns:
    mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
    """
    m = X.shape[0]  # number of training examples
    mini_batches = []
    np.random.seed(seed)
    # Step 1: Shuffle (X, Y)
    permutation = list(np.random.permutation(m))
    shuffled_X = X[permutation, :, :, :]
    shuffled_Y = Y[permutation, :]
    # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
    num_complete_minibatches = math.floor(
        m / mini_batch_size)  # number of mini batches of size mini_batch_size in your partitionning
    for k in range(0, num_complete_minibatches):
        mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
        mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)
    # Handling the end case (last mini-batch < mini_batch_size)
    if m % mini_batch_size != 0:
        mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :, :, :]
        mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)
    return mini_batches
def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)].T
    return Y
def main():
    global TRAINING
    DATA = sio.loadmat('/home/vision/Data/DSCN_data/ORL_32x32.mat')
    X_train = DATA['fea'].reshape(400, 32, 32, 1)/255
    Y_train = DATA['gnd']
    Y_train = convert_to_one_hot(Y_train-1, 40).T
    m, H_size, W_size, C_size = X_train.shape
    classes = 40
    mini_batch_size = 20
    max_step = 20000
    best_acc = 0
    global_step = tf.Variable(0, trainable=False)
    model_name = 'ORL'
    restore_path = '/home/vision/Data/checkpoint/' + model_name
    if not os.path.isdir(restore_path):
        os.makedirs(restore_path)
    path = restore_path + '/' + model_name + '.ckpt'
    X = tf.placeholder(tf.float32, shape=(None, H_size, W_size, C_size), name='X')
    Y = tf.placeholder(tf.float32, shape=(None, classes), name='Y')
    logits = ResNet50_reference(X)
    loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=logits))
    # optimizer = tf.train.GradientDescentOptimizer(0.001)
    # optimizer = tf.train.MomentumOptimizer(0.1, momentum=0.9, use_locking=False, name='Momentum')
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step)
    saver = tf.train.Saver() # 位置要放对,否则参数无法save
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.argmax(Y, axis=1)), tf.float32))
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        mini_batches = random_mini_batches(X_train, Y_train, mini_batch_size)
        ckpt = tf.train.get_checkpoint_state(restore_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
            print('model restored: ', 'pretraining step =', step)
        else:
            step = 0
        while step < max_step:
            X_mini_batch, Y_mini_batch = mini_batches[np.random.randint(0, len(mini_batches))]
            _, cost_sess = sess.run([train_op, loss], feed_dict={X: X_mini_batch, Y: Y_mini_batch})
            acc = sess.run(accuracy, feed_dict={X: X_train, Y: Y_train})
            if step % 50 == 0:
                print('step:', step, '|', cost_sess)
            if acc > best_acc:
                print('step:', step, '|', 'Saving...', 'acurracy: ', acc)
                saver.save(sess, path, global_step=global_step)
                best_acc = acc
            step = step + 1
        sess.run(tf.assign(TRAINING, False))
if __name__ == '__main__':
    main()

0 个答案:

没有答案
相关问题