Keras保存模型预测不同会话的不同值

时间:2018-04-03 07:54:26

标签: machine-learning nlp keras

我已经训练了一个命名实体识别模型,在保存并加载后,它会在同一个IPython会话上给出正确的预测,但每当我关闭会话并再次打开它时,随机加载的模型预测。你能帮帮我吗?

我使用以下命令以hdf5格式保存了模型:

Model.save("filename")

我正在使用以下方式加载它:

Model.load_model("filename")

这是我的完整代码

import pandas as pd
import numpy as np
import os
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.models import Model, Input,load_model
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,  
Bidirectional
from nltk import pos_tag, word_tokenize,sent_tokenize



data = pd.read_csv("E:\ml tut\entity recognition\exdataset.csv", 
encoding="latin1")
data = data.fillna(method="ffill")
words = list(set(data["Word"].values))
words.append("ENDPAD")
n_words = len(words); n_words

tags = list(set(data["Tag"].values))
n_tags = len(tags); n_tags

class SentenceGetter(object):

    def __init__(self, data):
        self.n_sent = 1
        self.data = data
        self.empty = False
        agg_func = lambda s: [((w, p), t) for w, p, t in 
                 zip(s["Word"].values.tolist(),s["POS"].values.tolist(),     
                                             s["Tag"].values.tolist())]
        self.grouped = self.data.groupby("Sentence #").apply(agg_func)
        self.sentences = [s for s in self.grouped]

    def get_next(self):
        try:
            s = self.grouped["Sentence: {}".format(self.n_sent)]
            self.n_sent += 1
            return s
        except:
            return None

getter = SentenceGetter(data)

sent = getter.get_next()
print(sent)

sentences = getter.sentences

max_len = 50
word2idx = {w: i for i, w in enumerate(words)}
tag2idx = {t: i for i, t in enumerate(tags)}





input = Input(shape=(max_len,))
model = Embedding(input_dim=n_words, output_dim=50, input_length=max_len) 
       (input)

model = Dropout(0.1)(model)

model = Bidirectional(LSTM(units=100, return_sequences=True, 
recurrent_dropout=0.1))(model)

out = TimeDistributed(Dense(n_tags, activation="softmax"))(model)  

if os.path.exists('my_model.h5'):
    print("loading model")
    model = load_model('my_model.h5')
else:
    print("training model")
    X = [[word2idx[w[0][0]] for w in s] for s in sentences]
    X = pad_sequences(maxlen=max_len, sequences=X, padding="post", 
    value=n_words - 1)
    y = [[tag2idx[w[1]] for w in s] for s in sentences]
    y = pad_sequences(maxlen=max_len, sequences=y, padding="post", 
    value=tag2idx["O"])
    y = [to_categorical(i, num_classes=n_tags) for i in y]
    X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.1)
    model = Model(input, out)
    model.compile(optimizer="rmsprop", loss="categorical_crossentropy", 
    metrics=["accuracy"])
    model.fit(X_tr, np.array(y_tr), batch_size=32, epochs=5, 
    validation_split=0.1, verbose=1)
    model.save('my_model.h5')



    my_input="Albert Einstein is a great guy,he lives in berlin, Germany."
    print("--------------") 

    test_sentence = word_tokenize(my_input)
    x_test_sent = pad_sequences(sequences=[[word2idx.get(w, 0) for w in 
    test_sentence]],padding="post", value=0, maxlen=max_len)
    i = 0
    p = model.predict(np.array([x_test_sent[i]]))
    p = np.argmax(p, axis=-1)
    print("{:15}||{}".format("Word", "Prediction"))
    print(30 * "=")
    for w, pred in zip(test_sentence, p[0]):
    if w != 0:
         print("{:15}: {}".format(w, tags[pred]))

2 个答案:

答案 0 :(得分:1)

请在生成模型时保存您的标签(标签=列表(设置(数据["标记"]。值))),这将解决您的问题。

因此您需要保存以下内容: 1.tags 2.型号 3.word2idx

答案 1 :(得分:0)

import pandas as pd
import numpy as np
import os
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.models import Model, Input,load_model
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,  
Bidirectional
from nltk import pos_tag, word_tokenize,sent_tokenize



data = pd.read_csv("E:\ml tut\entity recognition\exdataset.csv", 
encoding="latin1")
data = data.fillna(method="ffill")
words = list(set(data["Word"].values))
words.append("ENDPAD")
n_words = len(words); n_words

tags = list(set(data["Tag"].values))

以pickle或任何其他格式

保存您的标签
n_tags = len(tags); n_tags

class SentenceGetter(object):

    def __init__(self, data):
        self.n_sent = 1
        self.data = data
        self.empty = False
        agg_func = lambda s: [((w, p), t) for w, p, t in 
                 zip(s["Word"].values.tolist(),s["POS"].values.tolist(),     
                                             s["Tag"].values.tolist())]
        self.grouped = self.data.groupby("Sentence #").apply(agg_func)
        self.sentences = [s for s in self.grouped]

    def get_next(self):
        try:
            s = self.grouped["Sentence: {}".format(self.n_sent)]
            self.n_sent += 1
            return s
        except:
            return None

getter = SentenceGetter(data)

sent = getter.get_next()
print(sent)

sentences = getter.sentences

max_len = 50
word2idx = {w: i for i, w in enumerate(words)}

将你的word2idx保存为pickle或任何其他格式

tag2idx = {t: i for i, t in enumerate(tags)}





input = Input(shape=(max_len,))
model = Embedding(input_dim=n_words, output_dim=50, input_length=max_len) 
       (input)

model = Dropout(0.1)(model)

model = Bidirectional(LSTM(units=100, return_sequences=True, 
recurrent_dropout=0.1))(model)

out = TimeDistributed(Dense(n_tags, activation="softmax"))(model)  

if os.path.exists('my_model.h5'):
    print("loading model")
    model = load_model('my_model.h5')
else:
    print("training model")
    X = [[word2idx[w[0][0]] for w in s] for s in sentences]
    X = pad_sequences(maxlen=max_len, sequences=X, padding="post", 
    value=n_words - 1)
    y = [[tag2idx[w[1]] for w in s] for s in sentences]
    y = pad_sequences(maxlen=max_len, sequences=y, padding="post", 
    value=tag2idx["O"])
    y = [to_categorical(i, num_classes=n_tags) for i in y]
    X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.1)
    model = Model(input, out)
    model.compile(optimizer="rmsprop", loss="categorical_crossentropy", 
    metrics=["accuracy"])
    model.fit(X_tr, np.array(y_tr), batch_size=32, epochs=5, 
    validation_split=0.1, verbose=1)
    model.save('my_model.h5')



    my_input="Albert Einstein is a great guy,he lives in berlin, Germany."
    print("--------------") 

    test_sentence = word_tokenize(my_input)
    x_test_sent = pad_sequences(sequences=[[word2idx.get(w, 0) for w in 
    test_sentence]],padding="post", value=0, maxlen=max_len)
    i = 0
    p = model.predict(np.array([x_test_sent[i]]))
    p = np.argmax(p, axis=-1)
    print("{:15}||{}".format("Word", "Prediction"))
    print(30 * "=")
    for w, pred in zip(test_sentence, p[0]):
    if w != 0:
         print("{:15}: {}".format(w, tags[pred]))