如何使用Tensorflow模型和冻结图进行预测

时间:2020-04-21 00:33:37

标签: python keras tensorflow2.0

有人可以指导我如何使用此模型和冻结图进行预测吗?我还显示了生成的数据结构。我想给我的模型一个形状为(20,9)的自定义.csv文件,并从中获取预测。该示例不会来自先前生成的数据。

我已经附加了在伪造的数据上构建模型然后创建模型的代码。我还无法弄清楚如何使用导出的冻结图或模型来获得预测。

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import os
import os.path as path
import glob
import pandas as pd
import re            # To match regular expression for extracting labels
import sklearn
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow import keras 
from tensorflow.keras import layers
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
np.random.seed(1111)

def create_random_csv_files(fault_classes, number_of_files_in_each_class):
    os.mkdir("./random_data/")  # Make a directory to save created files.
    for fault_class in fault_classes:
        for i in range(number_of_files_in_each_class):
            data = np.random.rand(180,).reshape(20,9)
            file_name = "./random_data/" + eval("fault_class") + "_" + "{0:03}".format(i+1) + ".csv" # This creates file_name
            np.savetxt(eval("file_name"), data, delimiter = ",", comments = "")
        print(str(eval("number_of_files_in_each_class")) + " " + eval("fault_class") + " files"  + " created.")


def tf_data_generator(file_list, batch_size = 20):
    i = 0
    while True:
        if i*batch_size >= len(file_list):  
            i = 0
            np.random.shuffle(file_list)
        else:
            file_chunk = file_list[i*batch_size:(i+1)*batch_size] 
            data = []
            labels = []
            label_classes = tf.constant(["Fault_1", "Fault_2", "Fault_3", "Fault_4"]) # This line has changed.
            for file in file_chunk:
                temp = pd.read_csv(open(file,'r'), header = None)
                data.append(temp.values) 
                pattern = tf.constant(eval("file[14:21]"))  # This line has changed
                for j in range(len(label_classes)):
                    if re.match(pattern.numpy(), label_classes[j].numpy()):  # This line has changed.
                        labels.append(j)
            data = np.asarray(data).reshape(-1,20,9)
            labels = np.asarray(labels)
            yield data, labels
            i = i + 1


print("numpy ==",np.__version__)
print("matplotlib== ",matplotlib.__version__)
print("pandas ==",pd.__version__)
print("re ==",re.__version__)
print("tensorflow ==",tf.__version__)
print("sklearn ==",sklearn.__version__)

create_random_csv_files(["Fault_1", "Fault_2", "Fault_3", "Fault_4"], number_of_files_in_each_class = 100)

files = glob.glob("./random_data/*")
print("Total number of files: ", len(files))
print("Showing first 10 files...")
print(files[:10])

print(files[0])
print(files[0][14:21])


check_data = tf_data_generator(files, batch_size = 10)


num = 0
for data, labels in check_data:
    print(data.shape, labels.shape)
    print(labels, "<--Labels")
    print()
    num = num + 1
    if num > 5: break


batch_size = 15
dataset = tf.data.Dataset.from_generator(tf_data_generator,args= [files, batch_size],output_types = (tf.float32, tf.float32),
                                        output_shapes = ((None,20,9),(None,)))



num = 0
for data, labels in dataset:
    print(data.shape, labels.shape)
    print(labels)
    print()
    num = num + 1
    if num > 7: break






train_file_names, test_file_names = train_test_split(files, test_size = 0.2, random_state = 321)

train_file_names, validation_file_names = train_test_split(train_file_names, test_size = 0.15, random_state = 232)

print("Number of train_files:" ,len(train_file_names))
print("Number of validation_files:" ,len(validation_file_names))
print("Number of test_files:" ,len(test_file_names))

batch_size = 10
train_dataset = tf.data.Dataset.from_generator(tf_data_generator, args = [train_file_names, batch_size], 
                                      output_shapes = ((None,20,9),(None,)),
                                      output_types = (tf.float32, tf.float32))

validation_dataset = tf.data.Dataset.from_generator(tf_data_generator, args = [validation_file_names, batch_size],
                                           output_shapes = ((None,20,9),(None,)),
                                           output_types = (tf.float32, tf.float32))

test_dataset = tf.data.Dataset.from_generator(tf_data_generator, args = [test_file_names, batch_size],
                                     output_shapes = ((None,20,9),(None,)),
                                     output_types = (tf.float32, tf.float32))


model = tf.keras.Sequential([
layers.Conv1D(32, 3, activation = "relu", input_shape = (20,9)),
layers.Conv1D(32, 3, activation = "relu"),
layers.GlobalMaxPooling1D(),
layers.Dense(16, activation = "relu"),
layers.Dense(4, activation = "softmax")
])
model.summary()

model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])

steps_per_epoch = 34
validation_steps = 6
steps = 10

model.fit(train_dataset, validation_data = validation_dataset, steps_per_epoch = steps_per_epoch,
 validation_steps = validation_steps, epochs = 10)


test_loss, test_accuracy = model.evaluate(test_dataset, steps = 10)

print("Test loss: ", test_loss)
print("Test accuracy:", test_accuracy)

# Save model to SavedModel format
os.mkdir("./models/")
tf.saved_model.save(model, "./models")

# Convert Keras model to ConcreteFunction
full_model = tf.function(lambda x: model(x))
full_model = full_model.get_concrete_function(
tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))

# Get frozen ConcreteFunction
frozen_func = convert_variables_to_constants_v2(full_model)
frozen_func.graph.as_graph_def()


layers = [op.name for op in frozen_func.graph.get_operations()]
print("-" * 50)
print("Frozen model layers: ")
for layer in layers:
    print(layer)


print("-" * 50)
print("Frozen model inputs: ")
print(frozen_func.inputs)
print("Frozen model outputs: ")
print(frozen_func.outputs)

# Save frozen graph from frozen ConcreteFunction to hard drive
tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                  logdir="./frozen_models",
                  name="frozen_graph.pb",
                  as_text=False)

0 个答案:

没有答案