0

I have a problem with training my multi input model. I have built it with the following code piece:

def create_covn_layers(input_layer):
    input = layers.Conv2D(32, (3,3), input_shape=get_img_input_shape(True))(input_layer)
    covn01 = layers.Conv2D(32, (3, 3))(input)
    acti01 = layers.Activation('relu')(covn01)
    pool01 = layers.MaxPooling2D((2, 2))(acti01)
    covn02 = layers.Conv2D(64, (3, 3))(pool01)
    acti02 = layers.Activation('relu')(covn02)
    pool02 = layers.MaxPooling2D(2, 2)(acti02)
    covn03 = layers.Conv2D(128, (3, 3))(pool02)
    acti02 = layers.Activation('relu')(covn03)
    pool02 = layers.MaxPooling2D(pool_size=(2,2), padding='same')(acti02)
    covn_base = layers.Dropout(0.2)(pool02)

    return covn_base



#flat = layers.Flatten()(pool03)
model_one_input = layers.Input(shape=get_img_input_shape(True))
model_one = create_covn_layers(model_one_input)

model_two_input = layers.Input(shape=get_img_input_shape(True))
model_two = create_covn_layers(model_two_input)

concat_feature_layer = layers.concatenate([model_one, model_two])
flatten_layer = layers.Flatten()(concat_feature_layer)
fully_connected_dense_big = layers.Dense(256, activation='relu')(flatten_layer)
dropout_one = layers.Dropout(0.3)(fully_connected_dense_big)
fully_connected_dense_small = layers.Dense(128, activation='relu')(dropout_one)
dropout_two = layers.Dropout(0.3)(fully_connected_dense_small)
output = layers.Dense(3, activation='softmax')(dropout_two)

model = Model(
    inputs=[model_one_input, model_two_input],
    outputs=output
)

The input layers accepts the following shape:

batch_size = 18

def get_img_input_shape(for_model=False):
    if for_model:
        return(299,299,3)
    return (299, 299)

[![image shape layer][1]][1]

The model structure:

https://i.sstatic.net/q0kI6.jpg

I have built a custom generator thats takes two generators with flowfromdataframe and output two input and one label.

train_generator_one = ImageDataGenerator(
rescale = 1./255, 
validation_split=0.2
)

train_generator_two = ImageDataGenerator(
rescale = 1./255, 
validation_split=0.2
)

input_1_train_gen = train_generator_one.flow_from_dataframe(
    balanced_eeg_data,
    batch_size=batch_size, 
    target_size=get_img_input_shape(), 
    shuffle=False,
    color_mode="rgb",
    class_mode="categorical",
    subset="training")

input_2_train_gen = train_generator_two.flow_from_dataframe(
    balanced_ecg_data,
    batch_size=batch_size, 
    target_size=get_img_input_shape(), 
    shuffle=False,
    color_mode="rgb",
    class_mode="categorical",
    subset="training")

input_1_validation_gen = train_generator_one.flow_from_dataframe(
    balanced_eeg_data,
    batch_size=batch_size, 
    target_size=get_img_input_shape(), 
    shuffle=False,
    color_mode="rgb",
    class_mode="categorical",
    subset="validation")


input_2_validation_gen = train_generator_two.flow_from_dataframe(
    balanced_ecg_data,
    batch_size=batch_size, 
    target_size=get_img_input_shape(), 
    shuffle=False,
    color_mode="rgb",
    class_mode="categorical",
    subset="validation")

def create_data_generator(data_gen_one, data_gen_two):

    while(True):
        _gen1, _gen1_l = next(data_gen_one)
        _gen2, _gen2_l = next(data_gen_two)

        yield [_gen1, _gen2], [_gen1_l]

multi_train_generator = create_data_generator(
    input_1_train_gen,
    input_2_train_gen
    )

multi_validation_generator = create_data_generator(
    input_1_validation_gen,
    input_2_validation_gen
    )

When i call the model.fit however it gives an attribute error:

history = model.fit(
    multi_train_generator,
    epochs=2,
    steps_per_epoch = input_1_train_gen.samples//batch_size, 
    validation_data=multi_validation_generator, 
    validation_steps = input_1_validation_gen.samples//batch_size,
)

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
/var/folders/0v/m6wt8rqj7s1dcljdyjrdfxmw0000gn/T/ipykernel_84306/4129641024.py in <module>
----> 1 history = model.fit(
      2     multi_train_generator,
      3     epochs=2,
      4     steps_per_epoch = input_1_train_gen.samples//batch_size,
      5     validation_data=multi_validation_generator,

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1181                 _r=1):
   1182               callbacks.on_train_batch_begin(step)
-> 1183               tmp_logs = self.train_function(iterator)
   1184               if data_handler.should_sync:
   1185                 context.async_wait()

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
    887 
    888       with OptionalXlaContext(self._jit_compile):
--> 889         result = self._call(*args, **kwds)
    890 
    891       new_tracing_count = self.experimental_get_tracing_count()

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
    931       # This is the first call of __call__, so we have to initialize.
    932       initializers = []
--> 933       self._initialize(args, kwds, add_initializers_to=initializers)
    934     finally:
    935       # At this point we know that the initialization is complete (or less

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
    761     self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
    762     self._concrete_stateful_fn = (
--> 763         self._stateful_fn._get_concrete_function_internal_garbage_collected(  # pylint: disable=protected-access
    764             *args, **kwds))
    765 

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
   3048       args, kwargs = None, None
   3049     with self._lock:
-> 3050       graph_function, _ = self._maybe_define_function(args, kwargs)
   3051     return graph_function
   3052 

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
   3442 
   3443           self._function_cache.missed.add(call_context_key)
-> 3444           graph_function = self._create_graph_function(args, kwargs)
   3445           self._function_cache.primary[cache_key] = graph_function
   3446 

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
   3277     arg_names = base_arg_names + missing_arg_names
   3278     graph_function = ConcreteFunction(
-> 3279         func_graph_module.func_graph_from_py_func(
   3280             self._name,
   3281             self._python_function,

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
    997         _, original_func = tf_decorator.unwrap(python_func)
    998 
--> 999       func_outputs = python_func(*func_args, **func_kwargs)
   1000 
   1001       # invariant: `func_outputs` contains only Tensors, CompositeTensors,

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
    670         # the function a weak reference to itself to avoid a reference cycle.
    671         with OptionalXlaContext(compile_with_xla):
--> 672           out = weak_wrapped_fn().__wrapped__(*args, **kwds)
    673         return out
    674 

/usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
    984           except Exception as e:  # pylint:disable=broad-except
    985             if hasattr(e, "ag_error_metadata"):
--> 986               raise e.ag_error_metadata.to_exception(e)
    987             else:
    988               raise

AttributeError: in user code:

    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:855 train_function  *
        return step_function(self, iterator)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:845 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:1285 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:2833 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/distribute/distribute_lib.py:3608 _call_for_each_replica
        return fn(*args, **kwargs)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:838 run_step  **
        outputs = model.train_step(data)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/training.py:800 train_step
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/compile_utils.py:439 update_state
        self.build(y_pred, y_true)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/compile_utils.py:361 build
        self._metrics = nest.map_structure_up_to(y_pred, self._get_metric_objects,
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/util/nest.py:1374 map_structure_up_to
        return map_structure_with_tuple_paths_up_to(
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/util/nest.py:1472 map_structure_with_tuple_paths_up_to
        results = [
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/util/nest.py:1473 <listcomp>
        func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/util/nest.py:1376 <lambda>
        lambda _, *values: func(*values),  # Discards the path arg.
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/compile_utils.py:485 _get_metric_objects
        return [self._get_metric_object(m, y_t, y_p) for m in metrics]
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/compile_utils.py:485 <listcomp>
        return [self._get_metric_object(m, y_t, y_p) for m in metrics]
    /usr/local/Caskroom/miniforge/base/envs/speciale_01_01/lib/python3.9/site-packages/tensorflow/python/keras/engine/compile_utils.py:506 _get_metric_object
        y_t_rank = len(y_t.shape.as_list())

    AttributeError: 'tuple' object has no attribute 'shape'

Can anyone help or point me to where the issue is?

The dataframe are identical except for the paths.

UPDATE: I found out that metrics['acc] is coursing this problem... very annoying... However why i fails i haven't found out yet. [1]: https://i.sstatic.net/AU6HU.png

1 Answer 1

5

So for anyone else who runs into this i have found the problem.

OBS: This problem doesn't occur when using a Sequential model... don't know why.

However when you're hot encoding the labels like below as i did:

hotencode

And using a multi input model with generators like:

gen

Then don't use metrics=['acc'] this does not work and you will get attribute error.

See the following: https://www.tensorflow.org/api_docs/python/tf/keras/metrics/CategoricalAccuracy

Use the tf.keras.metrics.CategoricalAccuracy This works with hot encoded labels.

Sign up to request clarification or add additional context in comments.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.