InternalError:无法获取元素为字节

时间:2018-11-20 08:51:46

标签: python tensorflow deep-learning jupyter-notebook

我正在尝试对包含混合分类数据和数字数据的某些日志数据使用TensorFlow运行DNNClassifier。我创建了特征列来指定和存储/散列张量流的数据。运行代码时,我收到“无法将元素作为字节获取”内部错误。

import pandas as pd
import tensorflow as tf
import matplotlib as plt
%matplotlib inline

diabetes = pd.read_csv('Crimes_-_2001_to_present.csv')
diabetes.head()
diabetes.columns
id= tf.feature_column.numeric_column('ID')
date_time = tf.feature_column.numeric_column('Date')   # grabing feature 
column as numeric values, rest should be done in same way
dist = tf.feature_column.numeric_column('District')
#x_coord = tf.feature_column.numeric_column(key='X Coordinate',dtype=tf.float64)
#y_coord = tf.feature_column.numeric_column('Y Coordiante')
year = tf.feature_column.numeric_column('Year')
lat = tf.feature_column.numeric_column('Latitude')
long = tf.feature_column.numeric_column('Longitude')
loc = tf.feature_column.numeric_column('Location')
arrest = tf.feature_column.categorical_column_with_vocabulary_list('Arrest',['True', 'False']) # grabing feature column as vocabulary according to the defined list
#crime = tf.feature_column.categorical_column_with_hash_bucket('Primary Type', hash_bucket_size=40)   # grabing feature column as vocabulary according to the hash bucket size and will automatically create list in background
Dist_bucket = tf.feature_column.bucketized_column(dist, boundaries =[1,10,20,30,40] ) #bucketized column split feature column value into different categories based on numerical ranges given in boundaries   
feat_column = [id, date_time, Dist_bucket, year, lat, long, loc]   #gathering all feature columns
x_data = diabetes.drop('Arrest', axis=1)
labels = diabetes['Arrest']
from sklearn.model_selection import  train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, labels,test_size = 0.3, random_state = 101 )


input_func = tf.estimator.inputs.pandas_input_fn(x=x_train, y=y_train, 
batch_size=10, num_epochs=1000, shuffle=True)
model=tf.estimator.LinearClassifier(feature_columns=feat_column, n_classes=2)


model.train(input_fn = input_func, steps = 1000)


eval_input_func = tf.estimator.inputs.pandas_input_fn(x = x_test, y=y_test, batch_size=10, num_epochs = 1, shuffle=False)

results = model.evaluate(eval_input_func)

results

然后我收到此错误

---------------------------------------------------------------------------
InternalError                             Traceback (most recent call last)
~\Anaconda3\envs\TensorFlow\lib\site- 
packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1291     try:
-> 1292       return fn(*args)
1293     except errors.OpError as e:

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in _run_fn(feed_dict, fetch_list, target_list, options, run_metadata)
 1276       return self._call_tf_sessionrun(
 -> 1277           options, feed_dict, fetch_list, target_list, run_metadata)
 1278 

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata)
1366         self._session, options, feed_dict, fetch_list, target_list,
-> 1367         run_metadata)
1368 

InternalError: Unable to get element as bytes.

During handling of the above exception, another exception occurred:

InternalError                             Traceback (most recent call last)
<ipython-input-8-9c9a6ee5b5b0> in <module>
  7 
  8 
 ----> 9 model.train(input_fn = input_func, steps = 1000)
 10 
 11 

 ~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
 354 
 355       saving_listeners = _check_listeners_type(saving_listeners)
--> 356       loss = self._train_model(input_fn, hooks, saving_listeners)
 357       logging.info('Loss for final step: %s.', loss)
 358       return self

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\estimator\estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
1179       return self._train_model_distributed(input_fn, hooks, saving_listeners)
1180     else:
-> 1181       return self._train_model_default(input_fn, hooks, saving_listeners)
1182 
1183   def _train_model_default(self, input_fn, hooks, saving_listeners):

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\estimator\estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
1213       return self._train_with_estimator_spec(estimator_spec, worker_hooks,
1214                                              hooks, global_step_tensor,
-> 1215                                              saving_listeners)
1216 
1217   def _train_model_distributed(self, input_fn, hooks, saving_listeners):

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\estimator\estimator.py in _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks, global_step_tensor, saving_listeners)
1407       loss = None
1408       while not mon_sess.should_stop():
-> 1409         _, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
1410     return loss
1411 

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\training\monitored_session.py in __exit__(self, exception_type, exception_value, traceback)
 781     if exception_type in [errors.OutOfRangeError, StopIteration]:
 782       exception_type = None
 --> 783     self._close_internal(exception_type)
784     # __exit__ should return True to suppress an exception.
785     return exception_type is None

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\training\monitored_session.py in _close_internal(self, exception_type)
819         if self._sess is None:
820           raise RuntimeError('Session is already closed.')
--> 821         self._sess.close()
822       finally:
823         self._sess = None

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\training\monitored_session.py in close(self)
1067     if self._sess:
1068       try:
-> 1069         self._sess.close()
1070       except _PREEMPTION_ERRORS:
1071         pass

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\training\monitored_session.py in close(self)
1211       self._coord.join(
1212           stop_grace_period_secs=self._stop_grace_period_secs,
-> 1213           ignore_live_threads=True)
1214     finally:
1215       try:

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\training\coordinator.py in join(self, threads, stop_grace_period_secs, ignore_live_threads)
387       self._registered_threads = set()
388       if self._exc_info_to_raise:
--> 389         six.reraise(*self._exc_info_to_raise)
390       elif stragglers:
391         if ignore_live_threads:

~\Anaconda3\envs\TensorFlow\lib\site-packages\six.py in reraise(tp, value, tb)
690                 value = tp()
691             if value.__traceback__ is not tb:
--> 692                 raise value.with_traceback(tb)
693             raise value
694         finally:

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\estimator\inputs\queues\feeding_queue_runner.py in _run(self, sess, enqueue_op, feed_fn, coord)
 92         try:
 93           feed_dict = None if feed_fn is None else feed_fn()
 ---> 94           sess.run(enqueue_op, feed_dict=feed_dict)
 95         except (errors.OutOfRangeError, errors.CancelledError):
 96           # This exception indicates that a queue was closed.

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
885     try:
886       result = self._run(None, fetches, feed_dict, options_ptr,
--> 887                          run_metadata_ptr)
888       if run_metadata:
889         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
1108     if final_fetches or final_targets or (handle and feed_dict_tensor):
1109       results = self._do_run(handle, final_targets, final_fetches,
-> 1110                              feed_dict_tensor, options, run_metadata)
1111     else:
1112       results = []

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1284     if handle is None:
1285       return self._do_call(_run_fn, feeds, fetches, targets, options,
-> 1286                            run_metadata)
1287     else:
1288       return self._do_call(_prun_fn, handle, feeds, fetches)

~\Anaconda3\envs\TensorFlow\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1306           self._config.experimental.client_handles_error_formatting):
1307         message = error_interpolation.interpolate(message, self._graph)
-> 1308       raise type(e)(node_def, op, message)
1309 
1310   def _extend_graph(self):

InternalError: Unable to get element as bytes.

0 个答案:

没有答案
相关问题