这是我的代码,如何将模型转换为节点rest api。我已经创建了转换集并保存了模型。任何人都可以通过我尝试过但未成功的api部分来帮助我。
{!!Form::open(['url' => ['posts', $post->id], 'method' => 'POST', 'class' => 'pull-right'])!!}
{{Form::hidden('_method', 'DELETE')}}
{{Form::submit('Delete', ['class' => 'btn btn-danger'])}}
{!!Form::close()!!}
training = []
output = []
output_empty = [0] * len(classes)
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)training = []
output = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=4000, batch_size=8, show_metric=True)
model.save('model.tflearn')
# save all of our data structures
import pickle
pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) )
import pickle
data = pickle.load( open( "training_data", "rb" ) )
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
# import our chat-bot intents file
import json
with open('D:\\android\\ad.json') as json_data:
intents = json.load(json_data)
def clean_up_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
ERROR_THRESHOLD = 0.25
答案 0 :(得分:0)
根据Tflearn文档,该库仍与Tensorflow兼容。 谷歌发布了Tensorflow JS,它既可以用作基于浏览器的浏览器,也可以用作NodeJS Javascript库。
可以将Tensorflow模型加载到Tensorflow.JS中,如链接中所述:
https://js.tensorflow.org/tutorials/import-saved-model.html
供参考;模型需要转换为TF.JS格式
-您需要先将Tensorflow.JS安装到您的Python环境中:
pip install tensorflowjs
-将现有的TensorFlow模型转换为TensorFlow.js Web格式
tensorflowjs_converter \
--input_format=tf_saved_model \
--output_node_names='Some/Model/Name' \
--saved_model_tags=serve \
/my/saved_model \
/my/web_model
在NodeJS环境中加载保存的模型:
const model = await tf.loadModel('file:///mypath/mymodel.json');
答案 1 :(得分:0)
有多种方法可以执行此操作。您可以使用Flask或Django等服务器框架。我将展示一个使用flask的简单示例:(请注意,这只是一个抽象原型)
创建模型类
import libraries
class Model ():
def __init__(self):
self.model = load()
def inference(self, inpts):
return self.model.predict(inputs)
注意,这只是一个原型,功能由您实现。
创建REST端点
from flask import Flask, request, jsonify
from model import Model
app = Flask("__main__")
model = Model()
@app.route("/inference", methods =["POST"])
def inference():
data = request.get_json()
results = model.inference(data["inputs"])
return jsonify(
{"result" : results }
)
然后,您可以使用 curl 测试终结点,还可以使用axios或fetch将发布请求发送到终结点。 如果您尝试使用相同的域,请不要忘记添加cors 。 谢谢