追踪Tensorflow'InvalidArgumentError:最大散点索引必须小于<数组大小'

时间:2019-09-04 04:33:08

标签: python tensorflow keras

我正在研究涉及Keras中自定义图层的CNN。我的目标是拥有允许用户为批处理中的每个图像生成唯一内核的层,并将每个内核应用于相应的图像。我让卷积层使用一些我从Keras源代码中剥离并重新整理的代码进行工作。

当我尝试使用TimeDistributed层将此模型应用于卷积层时,就会出现问题。 TimeDistributed层仅需要1个张量输入,因此我编写了自己的AuxTimeDistributed(下面的代码),该代码允许一个人时间分布多个张量。 (我现在跳过诸如遮罩之类的东西。)

当我尝试将卷积层包装两次(我的应用程序需要)时,问题就来了。最终输出是此错误:

tensorflow.python.framework.errors_impl.InvalidArgumentError: Max scatter index must be < array size (779 vs. 100)
     [[{{node aux_time_distributed_3/TensorArrayUnstack_1/TensorArrayScatter/TensorArrayScatterV3}}]]

这是我当前的设置:

from keras.layers import TimeDistributed, Input, Layer
from keras.models import Model
import keras.backend as K

import numpy as np

import tensorflow as tf
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils.generic_utils import has_arg
from tensorflow.python.keras.utils.generic_utils import is_all_none
from tensorflow.python.keras.utils.generic_utils import object_list_uid
from tensorflow.python.keras.utils.generic_utils import to_list

from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils.generic_utils import is_all_none
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest

def batch_apply(step_function,
        inputs,
        aux_inputs,
        batch_size=None,
        constants=None,
        input_length=None):
    flatted_inputs = nest.flatten(inputs)
    flatted_aux = nest.flatten(aux_inputs)
    time_steps = flatted_inputs[0].shape[0]
    time_steps_t = flatted_inputs[0].shape[1]
    batch_steps_t = array_ops.shape(flatted_inputs[0])[0]

    if constants is None:
        constants = []

    if batch_size is not None:
        successive_outputs = []

        inp_shape = inputs.shape.as_list()
        aux_shape = aux_inputs.shape.as_list()
        inp_shape[0] = batch_size
        aux_shape[0] = batch_size
        list_inputs = array_ops.unstack(tf.reshape(inputs, inp_shape))
        list_aux_inputs = array_ops.unstack(tf.reshape(aux_inputs, aux_shape))

        for i in range(batch_size):
            inp = list_inputs[i]
            aux = list_aux_inputs[i]
            output = step_function(inp, aux, tuple(constants))
            successive_outputs.append(output)
        outputs = array_ops.stack(successive_outputs)

          # static shape inference
        def set_shape(output_):
            if isinstance(output_, ops.Tensor):
                shape = output_.shape.as_list()
                shape[0] = batch_size
                output_.set_shape(shape)
            return output_

        outputs = nest.map_structure(set_shape, outputs)
    else:
        def prep_input(flatted_inputs):
            input_ta = tuple(
                tensor_array_ops.TensorArray(
                    dtype=inp.dtype, size=time_steps_t,
                    tensor_array_name='input_ts_%s' % i)
                for i, inp in enumerate(flatted_inputs))
            input_ta = tuple(ta.unstack(input_)
                for ta, input_ in zip(input_ta, flatted_inputs))
            return input_ta
        input_ta = prep_input(flatted_inputs)
        input_au = prep_input(flatted_aux)

        input_time_zero = nest.pack_sequence_as(inputs,
            [inp[0] for inp in flatted_inputs])
        aux_time_zero = nest.pack_sequence_as(aux_inputs,
            [inp[0] for inp in flatted_aux])
        output_time_zero = step_function(input_time_zero, aux_time_zero,
            tuple(constants))

        output_ta = tuple(
            tensor_array_ops.TensorArray(dtype=out.dtype, size=batch_steps_t,
                tensor_array_name='output_ta_%s' % i)
            for i, out in enumerate(nest.flatten(output_time_zero)))


        while_loop_kwargs = {'cond': lambda time, *_: time < batch_steps_t,
            'maximum_iterations': input_length, 'parallel_iterations': 32,
            'swap_memory': True}

        time = constant_op.constant(0, dtype='int32', name='time')

        def _step(time, output_ta_t):
            current_input = tuple(ta.read(time) for ta in input_ta)
            current_input = nest.pack_sequence_as(inputs, current_input)
            current_aux = tuple(au.read(time) for au in input_au)
            current_aux = nest.pack_sequence_as(aux_inputs, current_aux)

            output = step_function(current_input, current_aux, constants)
            flat_output = nest.flatten(output)
            output_ta_t = tuple(ta.write(time, out)
                for ta, out in zip(output_ta_t, flat_output))
            return (time + 1, output_ta_t)

        final_outputs = control_flow_ops.while_loop(
            body=_step, loop_vars=(time, output_ta),
            **while_loop_kwargs
        )

        outputs = tuple(o.stack() for o in final_outputs[1])
        outputs = nest.pack_sequence_as(output_time_zero, outputs)

    return outputs

def unpack_singleton(x):
    if len(x) == 1:
        return x[0]
    return x

def collect_previous_mask(input_tensors):
    input_tensors = to_list(input_tensors)
    masks = []
    for x in input_tensors:
        if hasattr(x, '_keras_history'):
            inbound_layer, node_index, tensor_index = x._keras_history
            node = inbound_layer._inbound_nodes[node_index]
            mask = node.output_masks[tensor_index]
            masks.append(mask)
        else:
            masks.append(None)
    return unpack_singleton(masks)

def collect_input_shape(input_tensors):
    input_tensors = to_list(input_tensors)
    shapes = []
    for x in input_tensors:
        try:
            shapes.append(K.int_shape(x))
        except TypeError:
            shapes.append(None)
    return unpack_singleton(shapes)

AuxTimeDistributed Layer:

class AuxTimeDistributed(TimeDistributed):
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape[i])
            for i in range(len(input_shape))]
        child_input_shape = [(input_shape[i][0],) + tuple(input_shape[i][2:])
            for i in range(len(input_shape))]
        if not self.layer.built:
            self.layer.build(child_input_shape)
            self.layer.built = True
        super(TimeDistributed, self).build()

    def compute_output_shape(self, input_shapes):
        child_input_shapes = [(input_shapes[i][0],) + tuple(input_shapes[i][2:])
            for i in range(len(input_shapes))]
        child_output_shape = self.layer.compute_output_shape(child_input_shapes)
        timesteps = input_shapes[0][1]

        output_shape = (child_output_shape[0], timesteps) + child_output_shape[1:]
        return output_shape

    def compute_mask(self, inputs, previous_mask):
        return None

    def call(self, inputs, training=None):
        kwargs = {}
        if has_arg(self.layer.call, 'training'):
            kwargs['training'] = training
        uses_learning_phase = False

        def for_inputs(func):
            return [func(i) for i in range(len(inputs))]

        input_shapes = for_inputs(lambda i: K.int_shape(inputs[i]))

        if input_shapes[0][0]:
            # If given a batch size, use the tf rnn function
            def step(x, _):
                global uses_learning_phase
                output = self.layer.call(x, **kwargs)
                if hasattr(output, '_uses_learning_phase'):
                    uses_learning_phase = (output._uses_learning_phase or
                                           uses_learning_phase)
                return output, []

            _, outputs, _ = K.rnn(step, [inputs, *aux],
                                  initial_states=[],
                                  input_length=input_shape[1],
                                  unroll=False)
            y = outputs
        else:
            # No batch size specified, therefore the layer will be able
            # to process batches of any size.
            # We can go with reshape-based implementation for performance.
            input_lengths = for_inputs(lambda i: input_shapes[i][1])

            for i in range(len(input_lengths)):
                if not input_lengths[i]:
                    input_lengths[i] = K.shape(inputs[i])[1]

            def get_shapes(i):
                return self._get_shape_tuple((-1,), inputs[i], 2)
            inner_shapes = for_inputs(get_shapes)

            # Shape: (num_samples * timesteps, ...). And track the
            # transformation in self._input_map.
            input_uids = for_inputs(lambda i: object_list_uid(inputs[i]))
            inputs = for_inputs(lambda i: K.reshape(inputs[i], inner_shapes[i]))

            def assign_uid(i):
                self._input_map[input_uids[i]] = inputs[i]
            for_inputs(assign_uid)

            # (num_samples * timesteps, ...)
            y = self.layer.call(inputs, **kwargs)

            if hasattr(y, '_uses_learning_phase'):
                uses_learning_phase = y._uses_learning_phase
            # Shape: (num_samples, timesteps, ...)
            output_shape = self.compute_output_shape(input_shapes)
            output_shape = self._get_shape_tuple(
                (-1, input_lengths[0]), y, 1, output_shape[2:])
            y = K.reshape(y, output_shape)

        # Apply activity regularizer if any:
        if (hasattr(self.layer, 'activity_regularizer') and
           self.layer.activity_regularizer is not None):
            regularization_loss = self.layer.activity_regularizer(y)
            self.add_loss(regularization_loss, inputs)

        if uses_learning_phase:
            y._uses_learning_phase = True
        return y

内核转换层:

class KernelConv(Layer):
    def __init__(self,
                rank=2,
                strides=1,
                padding='valid',
                data_format=None,
                dilation_rate=1,
                **kwargs):
        super(KernelConv, self).__init__(**kwargs)
        self.rank = rank
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = K.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
            'dilation_rate')
        self.input_spec = [InputSpec(ndim=self.rank+2),
            InputSpec(ndim=self.rank+3)]

    def build(self, input_shapes):
        self.built=True

    def call(self, inputs):
        images, kernels = inputs

        def conv_func(image_t, kernel_t, constants):
            # Reshape image data to batch size 1
            shape = (1,) + tuple(image_t.shape.as_list())
            image_t = tf.reshape(image_t, shape)

            # Use the backend conv2d function
            return K.conv2d(
                image_t,
                kernel_t,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate
            )[0]

        # Apply the convolution function to the batch of (image, aux) pairs
        return batch_apply(conv_func, images, kernels)

    def compute_output_shape(self, input_shapes):
        image_shape, kernel_shape = input_shapes

        if self.data_format == 'channels_last':
            space = image_shape[1:-1]
            filters = kernel_shape[-1]
            k=1
        elif self.data_format == 'channels_first':
            space = image_shape[2:]
            filters = kernel_shape[1]
            k=2
        new_space = []
        for i in range(len(space)):
            new_dim = conv_utils.conv_output_length(
                space[i],
                kernel_shape[i+k],
                padding=self.padding,
                stride=self.strides[i],
                dilation=self.dilation_rate[i])
            new_space.append(new_dim)
        if self.data_format == 'channels_last':
            output_shape = (image_shape[0],) + tuple(new_space) + (filters,)
        elif self.data_format == 'channels_first':
            output_shape = (image_shape[0], filters) + tuple(new_space)

        return output_shape

该层用作简单的卷积层:

images = Input(shape=(100, 100, 5))
kernels = Input(shape=(3, 3, 5, 32))
output = KernelConv(rank=2)([images, kernels])
model = Model(inputs=[images, kernels], outputs=[output])

img = np.random.rand(6, 100, 100, 5)
krn = np.random.rand(6, 3, 3, 5, 32)
out = np.random.rand(6, 98, 98, 32)

model.compile('adam', loss='mape')
print("can compile")

model.predict([img, krn])
print("can predict")

model.fit([img, krn], out)
print("can fit")

我可以像这样包装一次:

images = Input(shape=(10, 100, 100, 5))
kernels = Input(shape=(10, 3, 3, 5, 32))
output = AuxTimeDistributed(KernelConv(rank=2))([images, kernels])
model = Model(inputs=[images, kernels], outputs=[output])

img = np.random.rand(6, 10, 100, 100, 5)
krn = np.random.rand(6, 10, 3, 3, 5, 32)
out = np.random.rand(6, 10, 98, 98, 32)

model.compile('adam', loss='mape')
print("can compile")

model.predict([img, krn])
print("can predict")

model.fit([img, krn], out)
print("can fit")

但是,当我包装两次时,我得到了:

images = Input(shape=(13, 10, 100, 100, 5))
kernels = Input(shape=(13, 10, 3, 3, 5, 32))
output = AuxTimeDistributed(AuxTimeDistributed(
    KernelConv(rank=2)))([images, kernels])
model = Model(inputs=[images, kernels], outputs=[output])

img = np.random.rand(6, 13, 10, 100, 100, 5)
krn = np.random.rand(6, 13, 10, 3, 3, 5, 32)
out = np.random.rand(6, 13, 10, 98, 98, 32)

model.compile('adam', loss='mape')
print("can compile")

model.predict([img, krn])
print("can predict")

model.fit([img, krn], out)
print("can fit")
can compile
Traceback (most recent call last):
  File "minimal_time_dist.py", line 360, in <module>
    model.predict([img, krn])
  File "/home/parallels/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 1169, in predict
    steps=steps)
  File "/home/parallels/anaconda3/lib/python3.7/site-packages/keras/engine/training_arrays.py", line 294, in predict_loop
    batch_outs = f(ins_batch)
  File "/home/parallels/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 2715, in __call__
    return self._call(inputs)
  File "/home/parallels/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 2675, in _call
    fetched = self._callable_fn(*array_vals)
  File "/home/parallels/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1458, in __call__
    run_metadata_ptr)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Max scatter index must be < array size (779 vs. 100)
     [[{{node aux_time_distributed_3/TensorArrayUnstack_1/TensorArrayScatter/TensorArrayScatterV3}}]]

我能找到有关此错误的唯一信息是here, on line 1115

请帮助解释为什么会出现此错误,以及我该如何解决。谢谢!

编辑:我正在通过Parallels在MacOs 10.14.6上的Linux(Ubuntu 16.04)VM中运行。

0 个答案:

没有答案
相关问题