提高训练速度的方法

时间:2019-03-19 04:34:32

标签: tensorflow keras conv-neural-network transfer-learning

我是tensorflow的新手,我正在尝试采用转移学习进行特征提取。我有一个100k的gzip压缩hdf5文件中存储的600k图像的大型图像数据集。我正在使用生成器将图像加载到vgg16模型中。但是,我需要2000多个小时才能完成1个纪元。有什么方法可以优化代码,以便提高培训速度?

NAME = "vgg16-CNN"
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True,gpu_options=gpu_options))

#Model
num_classes=58
image_input = Input(shape=(224, 224, 3))
model = VGG16(input_tensor=image_input,include_top=True, weights='imagenet')
output_vgg16_conv = model.get_layer('fc2').output
x = Dense(num_classes, activation='softmax', name='predictions') (output_vgg16_conv)
pretrained_model = Model(inputs=image_input, outputs=x)

for layer in pretrained_model.layers[:-1]:
    layer.trainable=False

pretrained_model.compile(loss='categorical_crossentropy', 
                         optimizer='adam', metrics=['accuracy'])
pretrained_model.summary()

#Generator
def generator():
    extendable_hdf5_file = h5py.File('npx_train.hdf5','r')['dataset']
    y_train=pd.read_csv('train.csv')['Category']
    len_class=58
    y_train = to_categorical(np.array(y_train),len_class)
    for a,im in enumerate(extendable_hdf5_file):
        yield (im,y_train[a])

#Dataset from generator
ds = tf.data.Dataset.from_generator(
    generator, 
    (tf.float32, tf.float32), 
    ((224,224,3),(58,)))
ds = ds.prefetch(tf.contrib.data.AUTOTUNE)
ds = ds.batch(10)

#Model compile
with sess:
    sess.run(tf.global_variables_initializer())
    pretrained_model.fit(ds,epochs=10,steps_per_epoch=66662, 
                         verbose=1,callbacks=[tensorboard],workers=0)

更新: 通过将生成器直接加载到model.fit,我设法将训练时间缩短为每个时期60小时

hdf5_path = "npx_train.hdf5"
extendable_hdf5_file = h5py.File(hdf5_path,'r')['dataset']
def train_loader(files,y_train, batch_size):
    L = 553292

    while True:

        batch_start = 0
        batch_end = batch_size

        while batch_start < L:
            limit = min(batch_end, L)
            X = files[batch_start:limit]
            X = X/255
            X = np.float32(X)
            Y = y_train[batch_start:limit]

            yield (X,Y)

            batch_start += batch_size   
            batch_end += batch_size

with tf.device('/gpu:0'):
  pretrained_model.fit_generator(generator=train_loader(extendable_hdf5_file,y_train, 32),
                                          steps_per_epoch=16666, epochs=10, verbose=1,callbacks=[tensorboard], 
                                          validation_data=val_loader(extendable_hdf5_file,y_train, 32),
                                          validation_steps=4167, workers=0)

但是,培训单个图层仍然花费很长时间。希望能帮助您加快流程。 显卡:gtx1070

0 个答案:

没有答案