0

Hey when i am new to Tensoflow (but not to Neural nets in general). I have some images (25 x 25 piels RGB) and are trying to experiment on training a model with Tensorflow to categorize them. (No Convolutional Layers etc for now, just some Dense Layers).

As of my understanding i am doing something with the formatting of the input wrong. One input would be a 1D Numpy Array of the pixel Data.

import tensorflow as tf
from PIL import Image, ImageFilter, ImageEnhance

from os import listdir, mkdir, remove, rmdir
from os.path import isfile, join, isdir
import numpy as np
import matplotlib.pyplot as plt

items = ["Coke_Zero", "Fanta2"]

sourcePath = "./generated"
resolution = 25

validationRatio = 0.9

(x_train_data, y_train_data), (x_val_data, y_val_data) = tf.keras.datasets.fashion_mnist.load_data()
print(type(x_train_data))


#Transforms an image's Pixeldata to a 1dimensional list 
def imageToArray1(image):
    dataArray = []
    for pixel in list(image.getdata()):
        dataArray.append(pixel[0]/255)
        dataArray.append(pixel[1]/255)
        dataArray.append(pixel[2]/255)

    return dataArray

#Other way of transforming an image's Pixeldata to a 1-Dimensional list 
def imageToArray2(image):
    redChannel = []
    greenChannel = []
    blueChannel = []
    for pixel in list(image.getdata()):
        redChannel.append(pixel[0]/255)
        greenChannel.append(pixel[1]/255)
        blueChannel.append(pixel[2]/255)

    return redChannel + greenChannel + blueChannel


#format the Pixel and label values
def preprocessing_function(x_new, y_new):
  x_new = tf.cast(x_new, tf.float32) / 255.0
  y_new = tf.cast(y_new, tf.int64)

  return x_new, y_new


#generating the traning data by opnening the images and reading their pixel data and putting a label on them
def generateTrainingData():

    trainingDataX = []
    trainingDataY = []
    valDataX = []
    valDataY = []

    for j, item in enumerate(items):
        path = join(join(sourcePath,str(resolution)),item)

        images = listdir(path)

        

        threshhold = (int) (len(images) * validationRatio)

        for i, imagePath in enumerate(images):
            if(i%100==0):
                print(str(i)+ " of "+ str(len(images)))
            image = Image.open(join(path,imagePath))
            labeled = imageToArray1(image)
            output = items.index(item) 
            
            if(i>threshhold):
                valDataX.append(labeled)
                valDataY.append(output)
            else:
                trainingDataX.append(labeled)
                trainingDataY.append(output)

    #2D Numpy Array (Array of the 1D Input Arrays)
    trainingDataX = np.array(trainingDataX)
    
    #2D Numpy Array (Array of the 1D Labels for example: [1, 0])
    trainingDataY = tf.one_hot(np.array(trainingDataY),depth=len(items))

    #same as above for the validation data
    valDataX = np.array(valDataX)
    valDataY = tf.one_hot(np.array(valDataY),depth=len(items))
    
    #create and returning the dataset
    return (tf.data.Dataset.from_tensor_slices((trainingDataX,trainingDataY)),tf.data.Dataset.from_tensor_slices((valDataX,valDataY)))

# Tensorflow Magic below

#layer setup
model = tf.keras.Sequential([
    tf.keras.layers.Dense(units=256, activation='relu', input_dim = resolution * resolution),
    #tf.keras.layers.Dense(units=192, activation='relu'),
    #tf.keras.layers.Dense(units=128, activation='relu'),
    tf.keras.layers.Dense(units=len(items), activation='sigmoid'),
])

model.compile(optimizer='adam', 
              loss=tf.keras.losses.BinaryCrossentropy(),
              metrics=['accuracy'])


dataset_training, dataset_val = generateTrainingData()

print(dataset_training)

history = model.fit(
    dataset_training.repeat(), 
    epochs=10, 
    steps_per_epoch=500,
    validation_data=dataset_val.repeat(), 
    validation_steps=2
)

Full Error:

Traceback (most recent call last): File "c:\Users\User\Desktop\projects\Uni\Projekt\trainNeuralNet.py", line 102, in history = model.fit( ^^^^^^^^^^ File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler raise e.with_traceback(filtered_tb) from None File "C:\Users\User\AppData\Local\Temp_autograph_generated_filedxbn7ku9.py", line 15, in tf__train_function retval = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope) ^^^^^ ValueError: in user code:

File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\engine\training.py", line 1284, in train_function  *
    return step_function(self, iterator)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\engine\training.py", line 1268, in step_function  **
    outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\engine\training.py", line 1249, in run_step  **
    outputs = model.train_step(data)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\engine\training.py", line 1050, in train_step
    y_pred = self(x, training=True)
File "C:\Users\User\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
    raise e.with_traceback(filtered_tb) from None
File "C:\Users\user\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\engine\input_spec.py", line 253, in assert_input_compatibility
    raise ValueError(

ValueError: Exception encountered when calling layer 'sequential' (type Sequential).

Input 0 of layer "dense" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (1875,)

Call arguments received by layer 'sequential' (type Sequential):
  • inputs=tf.Tensor(shape=(1875,), dtype=float64)
  • training=True
  • mask=None

Just running the model, cant find out whats the problem

1 Answer 1

0

While your feature dimension is 1-dimensional, the Dense Layer expects a 2-dimensional input. This is due to the fact that you normally pass values in the form of (batch_size, input_size). You can read more about it in the tensorflow documentation:

Input shape

N-D tensor with shape: (batch_size, ..., input_dim). The most common situation would be a 2D input with shape (batch_size, input_dim).

So, pad your inputs to match the 2-dimensional requirement of the first Dense Layer and it should work.

Sign up to request clarification or add additional context in comments.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.