联合张量流中评估功能的问题

时间:2019-04-04 08:08:02

标签: tensorflow-federated

我试图用我自己的Keras基于CNN的模型重新实现the github tutorial。但是评估时出现错误。

from __future__ import absolute_import, division, print_function


import collections
from six.moves import range
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow_federated import python as tff

emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()

example_dataset = emnist_train.create_tf_dataset_for_client(
    emnist_train.client_ids[0])

NUM_EPOCHS = 10
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500


def preprocess(dataset):

  def element_fn(element):
    return collections.OrderedDict([
        ('x', tf.reshape(element['pixels'], [-1])),
        ('y', tf.reshape(element['label'], [1])),
    ])

  return dataset.repeat(NUM_EPOCHS).map(element_fn).shuffle(
      SHUFFLE_BUFFER).batch(BATCH_SIZE)

preprocessed_example_dataset = preprocess(example_dataset)

sample_batch = nest.map_structure(
    lambda x: x.numpy(), iter(preprocessed_example_dataset).next())


def make_federated_data(client_data, client_ids):
  return [preprocess(client_data.create_tf_dataset_for_client(x))
          for x in client_ids]

NUM_CLIENTS = 3

sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]

federated_train_data = make_federated_data(emnist_train, sample_clients)

len(federated_train_data), federated_train_data[0]

def create_compiled_keras_model():

  model = tf.keras.models.Sequential([
      tf.keras.layers.Reshape((28,28,1), input_shape=(784,)),
      tf.keras.layers.Conv2D(32, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
      tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
      tf.keras.layers.Conv2D(64, kernel_size=(5,5), activation="relu", padding = "same", strides = 1),
      tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'),
      tf.keras.layers.Flatten(),
      tf.keras.layers.Dense(512, activation="relu"),
      tf.keras.layers.Dense(10, activation="softmax"),
    ])

  def loss_fn(y_true, y_pred):
    return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(
        y_true, y_pred))

  model.compile(
      loss=loss_fn,
      optimizer=gradient_descent.SGD(learning_rate=0.02),
      metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

  return model

def model_fn():
  keras_model = create_compiled_keras_model()
  return tff.learning.from_compiled_keras_model(keras_model, sample_batch)


iterative_process = tff.learning.build_federated_averaging_process(model_fn)

state = iterative_process.initialize()

for round_num in range(1,10):
  state, metrics = iterative_process.next(state, federated_train_data)
  print('round {:2d}, metrics={}'.format(round_num, metrics))


##Evaluation of the model
#This function doesn't work
evaluation = tff.learning.build_federated_evaluation(model_fn)

federated_test_data = make_federated_data(emnist_test, sample_clients)

test_metrics = evaluation(state.model, federated_test_data)

我希望评估测试数据,但实际输出是以下错误:

---------------------------------------------------------------------------
_FallbackException                        Traceback (most recent call last)
/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
    482         "Tout", Tout, "f", f, "config", config, "config_proto", config_proto,
--> 483         "executor_type", executor_type)
    484       return _result

_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.

During handling of the above exception, another exception occurred:

AttributeError                            Traceback (most recent call last)
<ipython-input-23-6e9c77f70201> in <module>()
----> 1 evaluation = tff.learning.build_federated_evaluation(model_fn)

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in build_federated_evaluation(model_fn)
     83   @tff.federated_computation(
     84       tff.FederatedType(model_weights_type, tff.SERVER, all_equal=True),
---> 85       tff.FederatedType(tff.SequenceType(batch_type), tff.CLIENTS))
     86   def server_eval(server_model_weights, federated_dataset):
     87     client_outputs = tff.federated_map(

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in <lambda>(fn)
    406         args = (args,)
    407       arg_type = computation_types.to_type(args[0])
--> 408       return lambda fn: _wrap(fn, arg_type, self._wrapper_fn)

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper.py in _wrap(fn, parameter_type, wrapper_fn)
     94       function_utils.wrap_as_zero_or_one_arg_callable(fn, parameter_type),
     95       parameter_type,
---> 96       name=fn_name)
     97   py_typecheck.check_type(concrete_fn, function_utils.ConcreteFunction,
     98                           'value returned by the wrapper')

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/computation_wrapper_instances.py in _federated_computation_wrapper_fn(target_fn, parameter_type, name)
     52           parameter_type,
     53           ctx_stack,
---> 54           suggested_name=name))
     55   return computation_impl.ComputationImpl(target_lambda.proto, ctx_stack)
     56 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/federated_computation_utils.py in zero_or_one_arg_fn_to_building_block(fn, parameter_name, parameter_type, context_stack, suggested_name)
     73           value_impl.ValueImpl(
     74               computation_building_blocks.Reference(
---> 75                   parameter_name, parameter_type), context_stack))
     76     else:
     77       result = fn()

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in <lambda>(arg)
    551       # and to force any parameter bindings to be resolved now.
    552       # pylint: disable=unnecessary-lambda,undefined-variable
--> 553       return (lambda fn, at, kt: lambda arg: _unpack_and_call(fn, at, kt, arg))(
    554           fn, arg_types, kwarg_types)
    555       # pylint: enable=unnecessary-lambda,undefined-variable

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/core/impl/function_utils.py in _unpack_and_call(fn, arg_types, kwarg_types, arg)
    545                                 name, str(expected_type), str(actual_type)))
    546           kwargs[name] = element_value
--> 547         return fn(*args, **kwargs)
    548 
    549       # Deliberate wrapping to isolate the caller from the underlying function

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/federated_evaluation.py in server_eval(server_model_weights, federated_dataset)
     88         client_eval,
     89         [tff.federated_broadcast(server_model_weights), federated_dataset])
---> 90     return model.federated_output_computation(client_outputs.local_outputs)
     91 
     92   return server_eval

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
    531   @property
    532   def federated_output_computation(self):
--> 533     return self._model.federated_output_computation
    534 
    535 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow_federated/python/learning/model_utils.py in federated_output_computation(self)
    406   def federated_output_computation(self):
    407     metric_variable_type_dict = nest.map_structure(tf.TensorSpec.from_tensor,
--> 408                                                    self.report_local_outputs())
    409     federated_local_outputs_type = tff.FederatedType(
    410         metric_variable_type_dict, tff.CLIENTS, all_equal=False)

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
    314     if not self._created_variables:
    315       # If we did not create any variables the trace we have is good enough.
--> 316       return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds)  # pylint: disable=protected-access
    317 
    318     def fn_with_cond(*inner_args, **inner_kwds):

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _filtered_call(self, args, kwargs)
    382     """
    383     return self._call_flat(
--> 384         (t for t in nest.flatten((args, kwargs))
    385          if isinstance(
    386              t, (ops.Tensor, resource_variable_ops.ResourceVariable))))

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in _call_flat(self, args)
    431     # Only need to override the gradient in graph mode and when we have outputs.
    432     if context.executing_eagerly() or not self.outputs:
--> 433       outputs = self._inference_function.call(ctx, args)
    434     else:
    435       if not self._gradient_name:

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/function.py in call(self, ctx, args)
    267           executing_eagerly=executing_eagerly,
    268           config=function_call_options.config_proto_serialized,
--> 269           executor_type=function_call_options.executor_type)
    270 
    271     if executing_eagerly:

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/functional_ops.py in partitioned_call(args, f, tout, executing_eagerly, config, executor_type)
   1081       outputs = gen_functional_ops.stateful_partitioned_call(
   1082           args=args, Tout=tout, f=f, config_proto=config,
-> 1083           executor_type=executor_type)
   1084     else:
   1085       outputs = gen_functional_ops.partitioned_call(

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call(args, Tout, f, config, config_proto, executor_type, name)
    487         return stateful_partitioned_call_eager_fallback(
    488             args, Tout=Tout, f=f, config=config, config_proto=config_proto,
--> 489             executor_type=executor_type, name=name, ctx=_ctx)
    490       except _core._SymbolicException:
    491         pass  # Add nodes to the TensorFlow graph.

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_functional_ops.py in stateful_partitioned_call_eager_fallback(args, Tout, f, config, config_proto, executor_type, name, ctx)
    548     executor_type = ""
    549   executor_type = _execute.make_str(executor_type, "executor_type")
--> 550   _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, _ctx)
    551   _inputs_flat = list(args)
    552   _attrs = ("Tin", _attr_Tin, "Tout", Tout, "f", f, "config", config,

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in convert_to_mixed_eager_tensors(values, ctx)
    207 def convert_to_mixed_eager_tensors(values, ctx):
    208   v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209   types = [t._datatype_enum() for t in v]  # pylint: disable=protected-access
    210   return types, v
    211 

/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/eager/execute.py in <listcomp>(.0)
    207 def convert_to_mixed_eager_tensors(values, ctx):
    208   v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
--> 209   types = [t._datatype_enum() for t in v]  # pylint: disable=protected-access
    210   return types, v
    211 

AttributeError: 'Tensor' object has no attribute '_datatype_enum'

2 个答案:

答案 0 :(得分:1)

努里亚:应该只是fixed earlier today。如果您不想等待下一个版本(即将推出),我建议您仅从源代码构建本地pip包。您可以找到说明in the install guide

答案 1 :(得分:1)

作为后续操作,这里发布了TFF 0.4.0,其中包含此错误修正。