仅在所有线程均抛出异常时,如何关闭Java executor服务

时间:2019-02-03 15:47:56

标签: java multithreading

我在Java应用程序中使用以下代码,它产生3个工作线程。我需要的是,如果一个线程失败(从运行中引发未处理的异常),其他线程应继续执行。如果我按以下方式调用executor.shutdown(),它将终止其他线程(不接受新任务)。我想到的选项之一是在ExecutionException catch块上具有一个计数器,仅在counter ==时调用shutdown。 numConsumers。有更好的方法吗?

import tensorflow as tf
import pandas as pd

from tensorflow.estimator.inputs import pandas_input_fn
from tensorflow.estimator import DNNRegressor
from tensorflow.feature_column import numeric_column, indicator_column, categorical_column_with_vocabulary_list
from tensorflow.train import Feature, Features, BytesList, FloatList, Example
from tensorflow.python_io import TFRecordWriter

df = pd.DataFrame([
    [1, ["a", "b"], 10], 
    [2, ["b"], 20], 
], columns= ["a", "b", "label"])


writer = TFRecordWriter("test.tfrecord")
for row in df.iterrows():
    dict_feature = {}
    label_values = []
    for e in row[1].iteritems():
        if e[0] =="a":
            dict_feature.update({e[0]: Feature(float_list=FloatList(value=[e[1]]))})
        elif e[0] == "b":
            dict_feature.update({e[0]: Feature(bytes_list=BytesList(value=[m.encode('utf-8') for m in e[1]]))})
        elif e[0] == "label":
            dict_feature.update({e[0]: Feature(float_list=FloatList(value=[e[1]]))})

    example = Example(features=Features(feature=dict_feature))
    writer.write(example.SerializeToString()) 
writer.close()


def parse_tfrecords(example_proto):
    feature_description = {}
    feature_description.update({"a": tf.FixedLenFeature(shape=[], dtype=tf.float32)})
    feature_description.update({"b": tf.VarLenFeature(dtype=tf.string)})
    feature_description.update({"label": tf.FixedLenFeature(shape=[], dtype=tf.float32)})

    parsed_features = tf.parse_single_example(example_proto, feature_description)   
    features = { key: parsed_features[key] for key in ["a", "b"] }
    label = parsed_features["label"]
    return features, label

def tf_record_input_fn(filenames_pattern):

    def _input_fn():
        dataset = tf.data.TFRecordDataset(filenames=filenames_pattern)
        dataset = dataset.shuffle(buffer_size=128)
        dataset = dataset.map(map_func=parse_tfrecords)
        dataset = dataset.batch(batch_size=128)

        return dataset
    return _input_fn


feature_columns = [
    numeric_column("a"),
    indicator_column(categorical_column_with_vocabulary_list("b", vocabulary_list=['a', 'b']))
]
estimator = DNNRegressor(feature_columns=feature_columns, hidden_units=[1])
train_input_fn = tf_record_input_fn("test.tfrecord")
# Next line does not work
# train_input_fn = tf.estimator.inputs.pandas_input_fn(x=df[["a", "b"]], y=df.label, shuffle=True)
estimator.train(train_input_fn)

0 个答案:

没有答案