You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight, reset_metrics)
989 x, y, sample_weights = self._standardize_user_data(
990 x, y, sample_weight=sample_weight, class_weight=class_weight,
--> 991 extract_tensors_from_dataset=True)
992
993 # If self._distribution_strategy is True, then we are in a replica context
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
2535 # Additional checks to avoid users mistakenly using improper loss fns.
2536 training_utils.check_loss_and_target_compatibility(
-> 2537 y, self._feed_loss_fns, feed_output_shapes)
2538
2539 # If sample weight mode has not been set and weights are None for all the
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_utils.py in check_loss_and_target_compatibility(targets, loss_fns, output_shapes)
739 raise ValueError('A target array with shape ' + str(y.shape) +
740 ' was passed for an output of shape ' + str(shape) +
--> 741 ' while using as loss ' + loss_name + '. '
742 'This loss expects targets to have the same shape '
743 'as the output.')
ValueError: A target array with shape (32, 11) was passed for an output of shape (None, 10) while using as loss categorical_crossentropy. This loss expects targets to have the same shape as the output.
The text was updated successfully, but these errors were encountered:
I use google colab and when I run the train code I get this error
ValueError Traceback (most recent call last)
in ()
4 model_trainer.setModelTypeAsResNet()
5 model_trainer.setDataDirectory("/content/idenprof/v_data")
----> 6 model_trainer.trainModel(num_objects=10, num_experiments=200, enhance_data=True, batch_size=32, show_network_summary=True)
5 frames
/usr/local/lib/python3.6/dist-packages/imageai/Prediction/Custom/init.py in trainModel(self, num_objects, num_experiments, enhance_data, batch_size, initial_learning_rate, show_network_summary, training_image_size, continue_from_model, transfer_from_model, transfer_with_full_training, initial_num_objects, save_full_model)
340 model.fit_generator(train_generator, steps_per_epoch=int(num_train / batch_size), epochs=self.__num_epochs,
341 validation_data=test_generator,
--> 342 validation_steps=int(num_test / batch_size), callbacks=[checkpoint, lr_scheduler, tensorboard])
343
344
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1294 shuffle=shuffle,
1295 initial_epoch=initial_epoch,
-> 1296 steps_name='steps_per_epoch')
1297
1298 def evaluate_generator(self,
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_generator.py in model_iteration(model, data, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch, mode, batch_size, steps_name, **kwargs)
263
264 is_deferred = not model._is_compiled
--> 265 batch_outs = batch_function(*batch_data)
266 if not isinstance(batch_outs, list):
267 batch_outs = [batch_outs]
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight, reset_metrics)
989 x, y, sample_weights = self._standardize_user_data(
990 x, y, sample_weight=sample_weight, class_weight=class_weight,
--> 991 extract_tensors_from_dataset=True)
992
993 # If
self._distribution_strategy
is True, then we are in a replica context/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
2535 # Additional checks to avoid users mistakenly using improper loss fns.
2536 training_utils.check_loss_and_target_compatibility(
-> 2537 y, self._feed_loss_fns, feed_output_shapes)
2538
2539 # If sample weight mode has not been set and weights are None for all the
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/training_utils.py in check_loss_and_target_compatibility(targets, loss_fns, output_shapes)
739 raise ValueError('A target array with shape ' + str(y.shape) +
740 ' was passed for an output of shape ' + str(shape) +
--> 741 ' while using as loss
' + loss_name + '
. '742 'This loss expects targets to have the same shape '
743 'as the output.')
ValueError: A target array with shape (32, 11) was passed for an output of shape (None, 10) while using as loss
categorical_crossentropy
. This loss expects targets to have the same shape as the output.The text was updated successfully, but these errors were encountered: